diff
stringlengths
41
2.03M
msg
stringlengths
1
1.5k
repo
stringlengths
5
40
sha
stringlengths
40
40
time
stringlengths
20
20
mmm a / CMakeLists . txt <nl> ppp b / CMakeLists . txt <nl> if ( LLVM_FOUND ) <nl> add_definitions ( $ { LLVM_DEFINITIONS } ) <nl> set ( CMAKE_CXX_FLAGS " $ { CMAKE_CXX_FLAGS } - DTLANG_WITH_LLVM " ) <nl> llvm_map_components_to_libnames ( llvm_libs <nl> - Analysis <nl> Core <nl> ExecutionEngine <nl> InstCombine <nl> - Object <nl> OrcJIT <nl> RuntimeDyld <nl> ScalarOpts <nl> - IPO <nl> Support <nl> native <nl> ) <nl> mmm a / examples / llvm . py <nl> ppp b / examples / llvm . py <nl> <nl> <nl> x , y = ti . var ( ti . f32 ) , ti . var ( ti . f32 ) <nl> z , w = ti . var ( ti . f32 ) , ti . var ( ti . f32 ) <nl> + val = ti . var ( ti . f32 ) <nl> <nl> ti . cfg . use_llvm = True <nl> - ti . cfg . print_ir = True <nl> - ti . runtime . print_preprocessed = True <nl> + # ti . cfg . print_ir = True <nl> + # ti . runtime . print_preprocessed = True <nl> <nl> @ ti . layout <nl> def xy ( ) : <nl> fork = ti . root . dense ( ti . k , 128 ) <nl> fork . dense ( ti . ij , 16 ) . place ( x , y ) <nl> fork . dense ( ti . ijk , 4 ) . dense ( ti . i , 8 ) . place ( z , w ) <nl> + ti . root . dense ( ti . i , 128 ) . place ( val ) <nl> + <nl> + val [ 0 ] = 123456 <nl> + print ( ' val ' , val [ 0 ] ) <nl> <nl> @ ti . kernel <nl> def test ( ) : <nl> def test ( ) : <nl> for i in range ( 10 ) : <nl> if i % 2 = = 0 : <nl> a + = i <nl> - ti . print ( a ) <nl> + ti . print ( a ) <nl> <nl> test ( ) <nl> <nl> mmm a / src / backends / struct_llvm . cpp <nl> ppp b / src / backends / struct_llvm . cpp <nl> void StructCompilerLLVM : : run ( SNode & node ) { <nl> / / bottom to top <nl> compile ( node ) ; <nl> <nl> - Program * prog = & get_current_program ( ) ; <nl> + / * <nl> auto var = new llvm : : GlobalVariable ( * module , llvm_types [ & root ] , false , <nl> llvm : : GlobalVariable : : CommonLinkage , 0 ) ; <nl> + * / <nl> <nl> / / get corner coordinates <nl> / * <nl> void StructCompilerLLVM : : run ( SNode & node ) { <nl> } <nl> * / <nl> <nl> + / / TODO : general allocators <nl> + auto root_size = <nl> + tlctx - > jit - > getDataLayout ( ) . getTypeAllocSize ( llvm_types [ & root ] ) ; <nl> + <nl> + creator = [ = ] { <nl> + TC_INFO ( " Allocating data structure of size { } " , root_size ) ; <nl> + return std : : malloc ( root_size ) ; <nl> + } ; <nl> + <nl> root_type = node . node_type_name ; <nl> generate_leaf_accessors ( node ) ; <nl> <nl> TC_INFO ( " Struct Module IR " ) ; <nl> module - > print ( errs ( ) , nullptr ) ; <nl> <nl> - emit ( " # if defined ( TC_STRUCT ) " ) ; <nl> - emit ( " TC_EXPORT void * create_data_structure ( ) { { " ) ; <nl> - <nl> - emit ( " Managers : : initialize ( ) ; " ) ; <nl> - <nl> TC_ASSERT ( ( int ) snodes . size ( ) < = max_num_snodes ) ; <nl> for ( int i = 0 ; i < ( int ) snodes . size ( ) ; i + + ) { <nl> / / if ( snodes [ i ] - > type = = SNodeType : : pointer | | <nl> mmm a / src / program . h <nl> ppp b / src / program . h <nl> class Program { <nl> Kernel * current_kernel ; <nl> SNode * current_snode ; <nl> SNode * snode_root ; <nl> + / / pointer to the data structure . assigned to context . buffers [ 0 ] during kernel <nl> + / / launches <nl> void * data_structure ; <nl> CompileConfig config ; <nl> CPUProfiler cpu_profiler ; <nl>
host data structure access
taichi-dev/taichi
ac0f84130f696823da5f389d0db8bc0f7780f7e3
2019-08-06T19:17:27Z
mmm a / src / mongo / db / pipeline / expression . cpp <nl> ppp b / src / mongo / db / pipeline / expression . cpp <nl> namespace { <nl> <nl> verify ( str : : equals ( expr . fieldName ( ) , " $ let " ) ) ; <nl> <nl> - uassert ( 16874 , " $ let only supports an object as it ' s argument " , <nl> + uassert ( 16874 , " $ let only supports an object as its argument " , <nl> expr . type ( ) = = Object ) ; <nl> const BSONObj args = expr . embeddedObject ( ) ; <nl> <nl>
Fix grammar in $ let agg expression error message .
mongodb/mongo
c76d80bce0212fe9770ddb6876878eb16014769b
2014-10-23T15:55:43Z
new file mode 100644 <nl> index 0000000000 . . 834d9f1909 <nl> mmm / dev / null <nl> ppp b / code / graph - algorithms / graph_coloring / graph_coloring . java <nl> <nl> + import java . util . * ; <nl> + <nl> + public class Coloring { <nl> + <nl> + int minColors ; <nl> + int [ ] bestColoring ; <nl> + <nl> + public int minColors ( boolean [ ] [ ] graph ) { <nl> + int n = graph . length ; <nl> + bestColoring = new int [ n ] ; <nl> + int [ ] id = new int [ n + 1 ] ; <nl> + int [ ] deg = new int [ n + 1 ] ; <nl> + for ( int i = 0 ; i < = n ; i + + ) <nl> + id [ i ] = i ; <nl> + bestColoring = new int [ n ] ; <nl> + int res = 1 ; <nl> + for ( int from = 0 , to = 1 ; to < = n ; to + + ) { <nl> + int best = to ; <nl> + for ( int i = to ; i < n ; i + + ) { <nl> + if ( graph [ id [ to - 1 ] ] [ id [ i ] ] ) <nl> + + + deg [ id [ i ] ] ; <nl> + if ( deg [ id [ best ] ] < deg [ id [ i ] ] ) <nl> + best = i ; <nl> + } <nl> + int t = id [ to ] ; <nl> + id [ to ] = id [ best ] ; <nl> + id [ best ] = t ; <nl> + if ( deg [ id [ to ] ] = = 0 ) { <nl> + minColors = n + 1 ; <nl> + dfs ( graph , id , new int [ n ] , from , to , from , 0 ) ; <nl> + from = to ; <nl> + res = Math . max ( res , minColors ) ; <nl> + } <nl> + } <nl> + return res ; <nl> + } <nl> + <nl> + void dfs ( boolean [ ] [ ] graph , int [ ] id , int [ ] coloring , int from , int to , int cur , int usedColors ) { <nl> + if ( usedColors > = minColors ) <nl> + return ; <nl> + if ( cur = = to ) { <nl> + for ( int i = from ; i < to ; i + + ) <nl> + bestColoring [ id [ i ] ] = coloring [ i ] ; <nl> + minColors = usedColors ; <nl> + return ; <nl> + } <nl> + boolean [ ] used = new boolean [ usedColors + 1 ] ; <nl> + for ( int i = 0 ; i < cur ; i + + ) <nl> + if ( graph [ id [ cur ] ] [ id [ i ] ] ) <nl> + used [ coloring [ i ] ] = true ; <nl> + for ( int i = 0 ; i < = usedColors ; i + + ) { <nl> + if ( ! used [ i ] ) { <nl> + int tmp = coloring [ cur ] ; <nl> + coloring [ cur ] = i ; <nl> + dfs ( graph , id , coloring , from , to , cur + 1 , Math . max ( usedColors , i + 1 ) ) ; <nl> + coloring [ cur ] = tmp ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + public static void main ( String [ ] args ) { <nl> + Random rnd = new Random ( 1 ) ; <nl> + for ( int step = 0 ; step < 1000 ; step + + ) { <nl> + int n = rnd . nextInt ( 10 ) + 1 ; <nl> + boolean [ ] [ ] g = new boolean [ n ] [ n ] ; <nl> + for ( int i = 0 ; i < n ; i + + ) <nl> + for ( int j = 0 ; j < i ; j + + ) <nl> + if ( rnd . nextBoolean ( ) ) { <nl> + g [ i ] [ j ] = true ; <nl> + g [ j ] [ i ] = true ; <nl> + } <nl> + int res1 = new Coloring ( ) . minColors ( g ) ; <nl> + int res2 = colorSlow ( g ) ; <nl> + if ( res1 ! = res2 ) <nl> + throw new RuntimeException ( ) ; <nl> + } <nl> + } <nl> + <nl> + static int colorSlow ( boolean [ ] [ ] g ) { <nl> + int n = g . length ; <nl> + for ( int allowedColors = 1 ; ; allowedColors + + ) { <nl> + long colors = 1 ; <nl> + for ( int i = 0 ; i < n ; i + + ) <nl> + colors * = allowedColors ; <nl> + m1 : <nl> + for ( long c = 0 ; c < colors ; c + + ) { <nl> + int [ ] col = new int [ n ] ; <nl> + long cur = c ; <nl> + for ( int i = 0 ; i < n ; i + + ) { <nl> + col [ i ] = ( int ) ( cur % allowedColors ) ; <nl> + cur / = allowedColors ; <nl> + } <nl> + for ( int i = 0 ; i < n ; i + + ) <nl> + for ( int j = 0 ; j < i ; j + + ) <nl> + if ( g [ i ] [ j ] & & col [ i ] = = col [ j ] ) <nl> + continue m1 ; <nl> + return allowedColors ; <nl> + } <nl> + } <nl> + } <nl> + } <nl>
Merge pull request from cenaion / patch - 37
OpenGenus/cosmos
674080485ad1e4b994f5f882fc688d0137df0946
2017-10-20T10:13:59Z
mmm a / templates / tools / dockerfile / csharp_deps . include <nl> ppp b / templates / tools / dockerfile / csharp_deps . include <nl> RUN apt - get update & & apt - get - y dist - upgrade & & apt - get install - y $ { ' \ \ ' } <nl> ca - certificates - mono $ { ' \ \ ' } <nl> nuget $ { ' \ \ ' } <nl> & & apt - get clean <nl> + <nl> + RUN nuget update - self <nl>
update nuget after installing in docker
grpc/grpc
82fd31ac35c38a81b15e25d61971a9811303ee79
2016-08-03T06:51:43Z
mmm a / Telegram / SourceFiles / history / history_inner_widget . cpp <nl> ppp b / Telegram / SourceFiles / history / history_inner_widget . cpp <nl> void HistoryInner : : paintEvent ( QPaintEvent * e ) { <nl> p . setTextPalette ( st : : inTextPalette ) ; <nl> App : : roundRect ( p , _botAbout - > rect , st : : msgInBg , MessageInCorners , & st : : msgInShadow ) ; <nl> <nl> - p . setFont ( st : : msgNameFont ) ; <nl> - p . setPen ( st : : dialogsNameFg ) ; <nl> - p . drawText ( _botAbout - > rect . left ( ) + st : : msgPadding . left ( ) , _botAbout - > rect . top ( ) + st : : msgPadding . top ( ) + st : : msgNameFont - > ascent , tr : : lng_bot_description ( tr : : now ) ) ; <nl> + auto top = _botAbout - > rect . top ( ) + st : : msgPadding . top ( ) ; <nl> + if ( ! _history - > peer - > isRepliesChat ( ) ) { <nl> + p . setFont ( st : : msgNameFont ) ; <nl> + p . setPen ( st : : dialogsNameFg ) ; <nl> + p . drawText ( _botAbout - > rect . left ( ) + st : : msgPadding . left ( ) , top + st : : msgNameFont - > ascent , tr : : lng_bot_description ( tr : : now ) ) ; <nl> + top + = + st : : msgNameFont - > height + st : : botDescSkip ; <nl> + } <nl> <nl> p . setPen ( st : : historyTextInFg ) ; <nl> - _botAbout - > info - > text . draw ( p , _botAbout - > rect . left ( ) + st : : msgPadding . left ( ) , _botAbout - > rect . top ( ) + st : : msgPadding . top ( ) + st : : msgNameFont - > height + st : : botDescSkip , _botAbout - > width ) ; <nl> + _botAbout - > info - > text . draw ( p , _botAbout - > rect . left ( ) + st : : msgPadding . left ( ) , top , _botAbout - > width ) ; <nl> <nl> p . restoreTextPalette ( ) ; <nl> } <nl> void HistoryInner : : recountHistoryGeometry ( ) { <nl> int32 tw = _scroll - > width ( ) - st : : msgMargin . left ( ) - st : : msgMargin . right ( ) ; <nl> if ( tw > st : : msgMaxWidth ) tw = st : : msgMaxWidth ; <nl> tw - = st : : msgPadding . left ( ) + st : : msgPadding . right ( ) ; <nl> - int32 mw = qMax ( _botAbout - > info - > text . maxWidth ( ) , st : : msgNameFont - > width ( tr : : lng_bot_description ( tr : : now ) ) ) ; <nl> + const auto descriptionWidth = _history - > peer - > isRepliesChat ( ) <nl> + ? 0 <nl> + : st : : msgNameFont - > width ( tr : : lng_bot_description ( tr : : now ) ) ; <nl> + int32 mw = qMax ( _botAbout - > info - > text . maxWidth ( ) , descriptionWidth ) ; <nl> if ( tw > mw ) tw = mw ; <nl> <nl> _botAbout - > width = tw ; <nl> _botAbout - > height = _botAbout - > info - > text . countHeight ( _botAbout - > width ) ; <nl> <nl> - int32 descH = st : : msgMargin . top ( ) + st : : msgPadding . top ( ) + st : : msgNameFont - > height + st : : botDescSkip + _botAbout - > height + st : : msgPadding . bottom ( ) + st : : msgMargin . bottom ( ) ; <nl> + const auto descriptionHeight = _history - > peer - > isRepliesChat ( ) <nl> + ? 0 <nl> + : ( st : : msgNameFont - > height + st : : botDescSkip ) ; <nl> + int32 descH = st : : msgMargin . top ( ) + st : : msgPadding . top ( ) + descriptionHeight + _botAbout - > height + st : : msgPadding . bottom ( ) + st : : msgMargin . bottom ( ) ; <nl> int32 descMaxWidth = _scroll - > width ( ) ; <nl> if ( Core : : App ( ) . settings ( ) . chatWide ( ) ) { <nl> descMaxWidth = qMin ( descMaxWidth , int32 ( st : : msgMaxWidth + 2 * st : : msgPhotoSkip + 2 * st : : msgMargin . left ( ) ) ) ; <nl> void HistoryInner : : updateBotInfo ( bool recount ) { <nl> int32 tw = _scroll - > width ( ) - st : : msgMargin . left ( ) - st : : msgMargin . right ( ) ; <nl> if ( tw > st : : msgMaxWidth ) tw = st : : msgMaxWidth ; <nl> tw - = st : : msgPadding . left ( ) + st : : msgPadding . right ( ) ; <nl> - int32 mw = qMax ( _botAbout - > info - > text . maxWidth ( ) , st : : msgNameFont - > width ( tr : : lng_bot_description ( tr : : now ) ) ) ; <nl> + const auto descriptionWidth = _history - > peer - > isRepliesChat ( ) <nl> + ? 0 <nl> + : st : : msgNameFont - > width ( tr : : lng_bot_description ( tr : : now ) ) ; <nl> + int32 mw = qMax ( _botAbout - > info - > text . maxWidth ( ) , descriptionWidth ) ; <nl> if ( tw > mw ) tw = mw ; <nl> <nl> _botAbout - > width = tw ; <nl> void HistoryInner : : updateBotInfo ( bool recount ) { <nl> updateSize ( ) ; <nl> } <nl> if ( _botAbout - > height > 0 ) { <nl> - int32 descH = st : : msgMargin . top ( ) + st : : msgPadding . top ( ) + st : : msgNameFont - > height + st : : botDescSkip + _botAbout - > height + st : : msgPadding . bottom ( ) + st : : msgMargin . bottom ( ) ; <nl> + const auto descriptionHeight = _history - > peer - > isRepliesChat ( ) <nl> + ? 0 <nl> + : ( st : : msgNameFont - > height + st : : botDescSkip ) ; <nl> + int32 descH = st : : msgMargin . top ( ) + st : : msgPadding . top ( ) + descriptionHeight + _botAbout - > height + st : : msgPadding . bottom ( ) + st : : msgMargin . bottom ( ) ; <nl> int32 descAtX = ( _scroll - > width ( ) - _botAbout - > width ) / 2 - st : : msgPadding . left ( ) ; <nl> int32 descAtY = qMin ( _historyPaddingTop - descH , ( _scroll - > height ( ) - descH ) / 2 ) + st : : msgMargin . top ( ) ; <nl> <nl> void HistoryInner : : updateSize ( ) { <nl> } <nl> <nl> if ( _botAbout & & _botAbout - > height > 0 ) { <nl> - int32 descH = st : : msgMargin . top ( ) + st : : msgPadding . top ( ) + st : : msgNameFont - > height + st : : botDescSkip + _botAbout - > height + st : : msgPadding . bottom ( ) + st : : msgMargin . bottom ( ) ; <nl> + const auto descriptionHeight = _history - > peer - > isRepliesChat ( ) <nl> + ? 0 <nl> + : ( st : : msgNameFont - > height + st : : botDescSkip ) ; <nl> + int32 descH = st : : msgMargin . top ( ) + st : : msgPadding . top ( ) + descriptionHeight + _botAbout - > height + st : : msgPadding . bottom ( ) + st : : msgMargin . bottom ( ) ; <nl> int32 descMaxWidth = _scroll - > width ( ) ; <nl> if ( Core : : App ( ) . settings ( ) . chatWide ( ) ) { <nl> descMaxWidth = qMin ( descMaxWidth , int32 ( st : : msgMaxWidth + 2 * st : : msgPhotoSkip + 2 * st : : msgMargin . left ( ) ) ) ; <nl>
Hide bot about header for Replies chat .
telegramdesktop/tdesktop
d642c3f3b5f94f3eb32ad476d26258cd73cfdf4c
2020-10-01T08:19:14Z
mmm a / src / mongo / db / query / lite_parsed_query . cpp <nl> ppp b / src / mongo / db / query / lite_parsed_query . cpp <nl> namespace mongo { <nl> <nl> _hasReadPref = queryObj . hasField ( " $ readPreference " ) ; <nl> <nl> - if ( ! isValidSortOrder ( _sort ) ) { <nl> - return Status ( ErrorCodes : : BadValue , " bad sort specification " ) ; <nl> + if ( ! _sort . isEmpty ( ) ) { <nl> + if ( ! isValidSortOrder ( _sort ) ) { <nl> + return Status ( ErrorCodes : : BadValue , " bad sort specification " ) ; <nl> + } <nl> + _sort = normalizeSortOrder ( _sort ) ; <nl> } <nl> - _sort = normalizeSortOrder ( _sort ) ; <nl> <nl> / / Min and Max objects must have the same fields . <nl> if ( ! _min . isEmpty ( ) & & ! _max . isEmpty ( ) ) { <nl>
SERVER - 10159 Don ' t validate and normalize an empty sort
mongodb/mongo
b71af1bc88e0ee2bab90d1f2a7400dc65649e087
2014-01-02T18:00:27Z
mmm a / src / v8memory . h <nl> ppp b / src / v8memory . h <nl> <nl> # ifndef V8_V8MEMORY_H_ <nl> # define V8_V8MEMORY_H_ <nl> <nl> + # include " src / globals . h " <nl> + <nl> namespace v8 { <nl> namespace internal { <nl> <nl> mmm a / tools / generate - header - include - checks . py <nl> ppp b / tools / generate - header - include - checks . py <nl> <nl> ' src / snapshot / object - deserializer . h ' , <nl> ' src / third_party / utf8 - decoder / utf8 - decoder . h ' , <nl> ' src / transitions . h ' , <nl> - ' src / v8memory . h ' , <nl> ] <nl> AUTO_EXCLUDE_PATTERNS = [ <nl> ' src / base / atomicops_internals_ . * ' , <nl>
[ iwyu ] Fix includes in v8memory . h
v8/v8
d065807971fd56c9fe82559efd1eb320ad555b75
2018-08-01T10:30:35Z
mmm a / modules / ml / include / opencv2 / ml . hpp <nl> ppp b / modules / ml / include / opencv2 / ml . hpp <nl> class CV_EXPORTS_W StatModel : public Algorithm <nl> } <nl> <nl> / * * @ brief Loads model from an XML String <nl> - <nl> @ param strModel The string variable containing the model ( in an XML format ) you want to load . <nl> <nl> This is static template method of StatModel . It ' s usage is following ( in the case of SVM ) : <nl> class CV_EXPORTS_W StatModel : public Algorithm <nl> model - > read ( fs . getFirstTopLevelNode ( ) ) ; <nl> return model - > isTrained ( ) ? model : Ptr < _Tp > ( ) ; <nl> } <nl> - <nl> + <nl> template < typename _Tp > static Ptr < _Tp > train ( const Ptr < TrainData > & data , const typename _Tp : : Params & p , int flags = 0 ) <nl> { <nl> Ptr < _Tp > model = _Tp : : create ( p ) ; <nl>
Re - edition of documentation
opencv/opencv
f54b80d2c84bc825384d6e29bf38daba511d6a60
2014-12-03T16:13:11Z
mmm a / include / mlir / IR / OpBase . td <nl> ppp b / include / mlir / IR / OpBase . td <nl> def SameOperandsShape : NativeOpTrait < " SameOperandsShape " > ; <nl> def SameOperandsAndResultShape : NativeOpTrait < " SameOperandsAndResultShape " > ; <nl> / / Op has the same operand and result type . <nl> def SameOperandsAndResultType : NativeOpTrait < " SameOperandsAndResultType " > ; <nl> - / / Op has the same element type for all operands . <nl> + / / Op has the same element type ( or type itself , if scalar ) for all operands . <nl> def SameOperandsElementType : NativeOpTrait < " SameOperandsElementType " > ; <nl> - / / Op has the same operand and result element type . <nl> + / / Op has the same operand and result element type ( or type itself , if scalar ) . <nl> def SameOperandsAndResultElementType : <nl> NativeOpTrait < " SameOperandsAndResultElementType " > ; <nl> / / Op is a terminator . <nl> mmm a / include / mlir / IR / OpDefinition . h <nl> ppp b / include / mlir / IR / OpDefinition . h <nl> class SameOperandsAndResultShape <nl> } ; <nl> <nl> / / / This class provides verification for ops that are known to have the same <nl> - / / / operand element type . <nl> + / / / operand element type ( or the type itself if it is scalar ) . <nl> / / / <nl> template < typename ConcreteType > <nl> class SameOperandsElementType <nl> class SameOperandsElementType <nl> } ; <nl> <nl> / / / This class provides verification for ops that are known to have the same <nl> - / / / operand and result element type . <nl> + / / / operand and result element type ( or the type itself if it is scalar ) . <nl> / / / <nl> template < typename ConcreteType > <nl> class SameOperandsAndResultElementType <nl> mmm a / lib / IR / Operation . cpp <nl> ppp b / lib / IR / Operation . cpp <nl> <nl> # include " mlir / IR / OpImplementation . h " <nl> # include " mlir / IR / PatternMatch . h " <nl> # include " mlir / IR / StandardTypes . h " <nl> + # include " mlir / IR / TypeUtilities . h " <nl> # include < numeric > <nl> + <nl> using namespace mlir ; <nl> <nl> / / / Form the OperationName for an op with the specified string . This either is <nl> LogicalResult OpTrait : : impl : : verifySameOperandsAndResultShape ( Operation * op ) { <nl> LogicalResult OpTrait : : impl : : verifySameOperandsElementType ( Operation * op ) { <nl> if ( failed ( verifyAtLeastNOperands ( op , 1 ) ) ) <nl> return failure ( ) ; <nl> + auto elementType = getElementTypeOrSelf ( op - > getOperand ( 0 ) ) ; <nl> <nl> - auto type = op - > getOperand ( 0 ) - > getType ( ) . dyn_cast < ShapedType > ( ) ; <nl> - if ( ! type ) <nl> - return op - > emitOpError ( " requires shaped type results " ) ; <nl> - auto elementType = type . getElementType ( ) ; <nl> - <nl> - for ( auto operandType : llvm : : drop_begin ( op - > getOperandTypes ( ) , 1 ) ) { <nl> - auto shapedType = operandType . dyn_cast < ShapedType > ( ) ; <nl> - if ( ! shapedType ) <nl> - return op - > emitOpError ( " requires shaped type operands " ) ; <nl> - if ( shapedType . getElementType ( ) ! = elementType ) <nl> + for ( auto operand : llvm : : drop_begin ( op - > getOperands ( ) , 1 ) ) { <nl> + if ( getElementTypeOrSelf ( operand ) ! = elementType ) <nl> return op - > emitOpError ( " requires the same element type for all operands " ) ; <nl> } <nl> <nl> OpTrait : : impl : : verifySameOperandsAndResultElementType ( Operation * op ) { <nl> failed ( verifyAtLeastNResults ( op , 1 ) ) ) <nl> return failure ( ) ; <nl> <nl> - auto type = op - > getResult ( 0 ) - > getType ( ) . dyn_cast < ShapedType > ( ) ; <nl> - if ( ! type ) <nl> - return op - > emitOpError ( " requires shaped type results " ) ; <nl> - auto elementType = type . getElementType ( ) ; <nl> + auto elementType = getElementTypeOrSelf ( op - > getResult ( 0 ) ) ; <nl> <nl> / / Verify result element type matches first result ' s element type . <nl> for ( auto result : drop_begin ( op - > getResults ( ) , 1 ) ) { <nl> - auto resultType = result - > getType ( ) . dyn_cast < ShapedType > ( ) ; <nl> - if ( ! resultType ) <nl> - return op - > emitOpError ( " requires shaped type results " ) ; <nl> - if ( resultType . getElementType ( ) ! = elementType ) <nl> + if ( getElementTypeOrSelf ( result ) ! = elementType ) <nl> return op - > emitOpError ( <nl> " requires the same element type for all operands and results " ) ; <nl> } <nl> <nl> / / Verify operand ' s element type matches first result ' s element type . <nl> for ( auto operand : op - > getOperands ( ) ) { <nl> - auto operandType = operand - > getType ( ) . dyn_cast < ShapedType > ( ) ; <nl> - if ( ! operandType ) <nl> - return op - > emitOpError ( " requires shaped type operands " ) ; <nl> - if ( operandType . getElementType ( ) ! = elementType ) <nl> + if ( getElementTypeOrSelf ( operand ) ! = elementType ) <nl> return op - > emitOpError ( <nl> " requires the same element type for all operands and results " ) ; <nl> } <nl> mmm a / test / IR / traits . mlir <nl> ppp b / test / IR / traits . mlir <nl> <nl> / / RUN : mlir - opt % s - split - input - file - verify - diagnostics | FileCheck % s <nl> <nl> / / CHECK : succeededSameOperandsElementType <nl> - func @ succeededSameOperandsElementType ( % t10x10 : tensor < 10x10xf32 > , % t1f : tensor < 1xf32 > , % v1 : vector < 1xf32 > , % t1i : tensor < 1xi32 > ) { <nl> + func @ succeededSameOperandsElementType ( % t10x10 : tensor < 10x10xf32 > , % t1f : tensor < 1xf32 > , % v1 : vector < 1xf32 > , % t1i : tensor < 1xi32 > , % sf : f32 ) { <nl> % 0 = " test . same_operand_element_type " ( % t1f , % t1f ) : ( tensor < 1xf32 > , tensor < 1xf32 > ) - > tensor < 1xi32 > <nl> % 1 = " test . same_operand_element_type " ( % t1f , % t10x10 ) : ( tensor < 1xf32 > , tensor < 10x10xf32 > ) - > tensor < 1xi32 > <nl> % 2 = " test . same_operand_element_type " ( % t10x10 , % v1 ) : ( tensor < 10x10xf32 > , vector < 1xf32 > ) - > tensor < 1xi32 > <nl> % 3 = " test . same_operand_element_type " ( % v1 , % t1f ) : ( vector < 1xf32 > , tensor < 1xf32 > ) - > tensor < 1xi32 > <nl> % 4 = " test . same_operand_element_type " ( % v1 , % t1f ) : ( vector < 1xf32 > , tensor < 1xf32 > ) - > tensor < 121xi32 > <nl> + % 5 = " test . same_operand_element_type " ( % sf , % sf ) : ( f32 , f32 ) - > i32 <nl> + % 6 = " test . same_operand_element_type " ( % sf , % t1f ) : ( f32 , tensor < 1xf32 > ) - > tensor < 121xi32 > <nl> + % 7 = " test . same_operand_element_type " ( % sf , % v1 ) : ( f32 , vector < 1xf32 > ) - > tensor < 121xi32 > <nl> + % 8 = " test . same_operand_element_type " ( % sf , % t10x10 ) : ( f32 , tensor < 10x10xf32 > ) - > tensor < 121xi32 > <nl> return <nl> } <nl> <nl> func @ failedSameOperandAndResultElementType_no_operands ( ) { <nl> <nl> / / mmm - - <nl> <nl> + func @ failedSameOperandElementType_scalar_type_mismatch ( % si : i32 , % sf : f32 ) { <nl> + / / expected - error @ + 1 { { requires the same element type for all operands } } <nl> + % 0 = " test . same_operand_element_type " ( % sf , % si ) : ( f32 , i32 ) - > tensor < 1xf32 > <nl> + } <nl> + <nl> + / / mmm - - <nl> + <nl> / / CHECK : succeededSameOperandAndResultElementType <nl> - func @ succeededSameOperandAndResultElementType ( % t10x10 : tensor < 10x10xf32 > , % t1f : tensor < 1xf32 > , % v1 : vector < 1xf32 > , % t1i : tensor < 1xi32 > ) { <nl> + func @ succeededSameOperandAndResultElementType ( % t10x10 : tensor < 10x10xf32 > , % t1f : tensor < 1xf32 > , % v1 : vector < 1xf32 > , % t1i : tensor < 1xi32 > , % sf : f32 ) { <nl> % 0 = " test . same_operand_and_result_element_type " ( % t1f , % t1f ) : ( tensor < 1xf32 > , tensor < 1xf32 > ) - > tensor < 1xf32 > <nl> % 1 = " test . same_operand_and_result_element_type " ( % t1f , % t10x10 ) : ( tensor < 1xf32 > , tensor < 10x10xf32 > ) - > tensor < 1xf32 > <nl> % 2 = " test . same_operand_and_result_element_type " ( % t10x10 , % v1 ) : ( tensor < 10x10xf32 > , vector < 1xf32 > ) - > tensor < 1xf32 > <nl> % 3 = " test . same_operand_and_result_element_type " ( % v1 , % t1f ) : ( vector < 1xf32 > , tensor < 1xf32 > ) - > tensor < 1xf32 > <nl> % 4 = " test . same_operand_and_result_element_type " ( % v1 , % t1f ) : ( vector < 1xf32 > , tensor < 1xf32 > ) - > tensor < 121xf32 > <nl> + % 5 = " test . same_operand_and_result_element_type " ( % sf , % sf ) : ( f32 , f32 ) - > f32 <nl> + % 6 = " test . same_operand_and_result_element_type " ( % sf , % t1f ) : ( f32 , tensor < 1xf32 > ) - > tensor < 121xf32 > <nl> + % 7 = " test . same_operand_and_result_element_type " ( % sf , % v1 ) : ( f32 , vector < 1xf32 > ) - > tensor < 121xf32 > <nl> + % 8 = " test . same_operand_and_result_element_type " ( % sf , % t10x10 ) : ( f32 , tensor < 10x10xf32 > ) - > tensor < 121xf32 > <nl> return <nl> } <nl> <nl> func @ failedSameOperandAndResultElementType_operand_mismatch ( % t1f : tensor < 1xf32 > <nl> <nl> / / mmm - - <nl> <nl> + func @ failedSameOperandAndResultElementType_result_mismatch ( % t1f : tensor < 1xf32 > ) { <nl> + / / expected - error @ + 1 { { requires the same element type for all operands and results } } <nl> + % 0 : 2 = " test . same_operand_and_result_element_type " ( % t1f ) : ( tensor < 1xf32 > ) - > ( tensor < 1xf32 > , tensor < 1xi32 > ) <nl> + } <nl> + <nl> + / / mmm - - <nl> + <nl> func @ failedSameOperandAndResultElementType_no_operands ( ) { <nl> / / expected - error @ + 1 { { expected 1 or more operands } } <nl> % 0 = " test . same_operand_and_result_element_type " ( ) : ( ) - > tensor < 1xf32 > <nl> mmm a / test / lib / TestDialect / TestOps . td <nl> ppp b / test / lib / TestDialect / TestOps . td <nl> def FunctionalRegionOp : TEST_Op < " functional_region_op " , <nl> <nl> def SameOperandElementTypeOp : TEST_Op < " same_operand_element_type " , <nl> [ SameOperandsElementType ] > { <nl> - let arguments = ( ins AnyVectorOrTensor , AnyVectorOrTensor ) ; <nl> - let results = ( outs AnyVectorOrTensor ) ; <nl> + let arguments = ( ins AnyType , AnyType ) ; <nl> + let results = ( outs AnyType ) ; <nl> } <nl> <nl> def SameOperandAndResultElementTypeOp : TEST_Op < " same_operand_and_result_element_type " , <nl> [ SameOperandsAndResultElementType ] > { <nl> - let arguments = ( ins Variadic < AnyVectorOrTensor > ) ; <nl> - let results = ( outs Variadic < AnyVectorOrTensor > ) ; <nl> + let arguments = ( ins Variadic < AnyType > ) ; <nl> + let results = ( outs Variadic < AnyType > ) ; <nl> } <nl> <nl> def SameOperandShapeOp : TEST_Op < " same_operand_shape " , [ SameOperandsShape ] > { <nl>
Allow element type traits to operate on scalars
tensorflow/tensorflow
cb1df5ee2fd7166618a9cc2ae69b5836a2db04a0
2019-10-05T17:06:06Z
mmm a / include / spdlog / tweakme . h <nl> ppp b / include / spdlog / tweakme . h <nl> <nl> / / This will prevent spdlog from querying the thread id on each log call . <nl> / / <nl> / / WARNING : If the log pattern contains thread id ( i . e , % t ) while this flag is <nl> - / / on , the result is undefined . <nl> + / / on , zero will be logged as thread id . <nl> / / <nl> / / # define SPDLOG_NO_THREAD_ID <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl>
Fix issue
gabime/spdlog
dd38e096b2ae316bb98eb24c0ce65652f1dfe261
2020-03-24T00:15:14Z
mmm a / src / core / lib / security / transport / client_auth_filter . c <nl> ppp b / src / core / lib / security / transport / client_auth_filter . c <nl> static void bubble_up_error ( grpc_exec_ctx * exec_ctx , grpc_call_element * elem , <nl> grpc_call_next_op ( exec_ctx , elem , & calld - > op ) ; <nl> } <nl> <nl> - static void add_error ( grpc_error * * combined , grpc_error * error ) { abort ( ) ; } <nl> + static void add_error ( grpc_error * * combined , grpc_error * error ) { <nl> + if ( error = = GRPC_ERROR_NONE ) return ; <nl> + if ( * combined = = GRPC_ERROR_NONE ) { <nl> + * combined = GRPC_ERROR_CREATE ( " Client auth metadata plugin error " ) ; <nl> + } <nl> + * combined = grpc_error_add_child ( * combined , error ) ; <nl> + } <nl> <nl> static void on_credentials_metadata ( grpc_exec_ctx * exec_ctx , void * user_data , <nl> grpc_credentials_md * md_elems , <nl> mmm a / src / core / lib / security / transport / server_auth_filter . c <nl> ppp b / src / core / lib / security / transport / server_auth_filter . c <nl> static grpc_metadata_array metadata_batch_to_md_array ( <nl> return result ; <nl> } <nl> <nl> - # if 0 <nl> - static grpc_mdelem remove_consumed_md ( grpc_exec_ctx * exec_ctx , void * user_data , <nl> - grpc_mdelem md ) { <nl> + static grpc_filtered_mdelem remove_consumed_md ( grpc_exec_ctx * exec_ctx , <nl> + void * user_data , <nl> + grpc_mdelem md ) { <nl> grpc_call_element * elem = user_data ; <nl> call_data * calld = elem - > call_data ; <nl> size_t i ; <nl> static grpc_mdelem remove_consumed_md ( grpc_exec_ctx * exec_ctx , void * user_data , <nl> const grpc_metadata * consumed_md = & calld - > consumed_md [ i ] ; <nl> if ( grpc_slice_eq ( GRPC_MDKEY ( md ) , consumed_md - > key ) & & <nl> grpc_slice_eq ( GRPC_MDVALUE ( md ) , consumed_md - > value ) ) <nl> - return GRPC_MDNULL ; <nl> + return GRPC_FILTERED_REMOVE ( ) ; <nl> } <nl> - return md ; <nl> + return GRPC_FILTERED_MDELEM ( md ) ; <nl> } <nl> - # endif <nl> <nl> static void destroy_op ( grpc_exec_ctx * exec_ctx , void * arg , grpc_error * error ) { <nl> gpr_free ( arg ) ; <nl> static void on_md_processing_done ( <nl> if ( status = = GRPC_STATUS_OK ) { <nl> calld - > consumed_md = consumed_md ; <nl> calld - > num_consumed_md = num_consumed_md ; <nl> - # if 0 <nl> - grpc_metadata_batch_filter ( & exec_ctx , calld - > recv_initial_metadata , <nl> - remove_consumed_md , elem ) ; <nl> - # else <nl> - if ( num_consumed_md ) abort ( ) ; <nl> - # endif <nl> + / * TODO ( ctiller ) : propagate error * / <nl> + GRPC_LOG_IF_ERROR ( <nl> + " grpc_metadata_batch_filter " , <nl> + grpc_metadata_batch_filter ( & exec_ctx , calld - > recv_initial_metadata , <nl> + remove_consumed_md , elem , <nl> + " Response metadata filtering error " ) ) ; <nl> grpc_metadata_array_destroy ( & calld - > md ) ; <nl> grpc_exec_ctx_sched ( & exec_ctx , calld - > on_done_recv , GRPC_ERROR_NONE , NULL ) ; <nl> } else { <nl> mmm a / src / core / lib / transport / metadata_batch . c <nl> ppp b / src / core / lib / transport / metadata_batch . c <nl> size_t grpc_metadata_batch_size ( grpc_metadata_batch * batch ) { <nl> } <nl> return size ; <nl> } <nl> + <nl> + static void add_error ( grpc_error * * composite , grpc_error * error , <nl> + const char * composite_error_string ) { <nl> + if ( error = = GRPC_ERROR_NONE ) return ; <nl> + if ( * composite = = GRPC_ERROR_NONE ) { <nl> + * composite = GRPC_ERROR_CREATE ( composite_error_string ) ; <nl> + } <nl> + * composite = grpc_error_add_child ( * composite , error ) ; <nl> + } <nl> + <nl> + grpc_error * grpc_metadata_batch_filter ( grpc_exec_ctx * exec_ctx , <nl> + grpc_metadata_batch * batch , <nl> + grpc_metadata_batch_filter_func func , <nl> + void * user_data , <nl> + const char * composite_error_string ) { <nl> + grpc_linked_mdelem * l = batch - > list . head ; <nl> + grpc_error * error = GRPC_ERROR_NONE ; <nl> + while ( l ) { <nl> + grpc_linked_mdelem * next = l - > next ; <nl> + grpc_filtered_mdelem new = func ( exec_ctx , user_data , l - > md ) ; <nl> + add_error ( & error , new . error , composite_error_string ) ; <nl> + if ( GRPC_MDISNULL ( new . md ) ) { <nl> + grpc_metadata_batch_remove ( exec_ctx , batch , l ) ; <nl> + } else if ( new . md . payload ! = l - > md . payload ) { <nl> + grpc_metadata_batch_substitute ( exec_ctx , batch , l , new . md ) ; <nl> + } <nl> + l = next ; <nl> + } <nl> + return error ; <nl> + } <nl> mmm a / src / core / lib / transport / metadata_batch . h <nl> ppp b / src / core / lib / transport / metadata_batch . h <nl> grpc_error * grpc_metadata_batch_add_tail ( <nl> <nl> grpc_error * grpc_attach_md_to_error ( grpc_error * src , grpc_mdelem md ) ; <nl> <nl> + typedef struct { <nl> + grpc_error * error ; <nl> + grpc_mdelem md ; <nl> + } grpc_filtered_mdelem ; <nl> + <nl> + # define GRPC_FILTERED_ERROR ( error ) \ <nl> + ( ( grpc_filtered_mdelem ) { ( error ) , GRPC_MDNULL } ) <nl> + # define GRPC_FILTERED_MDELEM ( md ) ( ( grpc_filtered_mdelem ) { GRPC_ERROR_NONE , ( md ) } ) <nl> + # define GRPC_FILTERED_REMOVE ( ) \ <nl> + ( ( grpc_filtered_mdelem ) { GRPC_ERROR_NONE , GRPC_MDNULL } ) <nl> + <nl> + typedef grpc_filtered_mdelem ( * grpc_metadata_batch_filter_func ) ( <nl> + grpc_exec_ctx * exec_ctx , void * user_data , grpc_mdelem elem ) ; <nl> + grpc_error * grpc_metadata_batch_filter ( <nl> + grpc_exec_ctx * exec_ctx , grpc_metadata_batch * batch , <nl> + grpc_metadata_batch_filter_func func , void * user_data , <nl> + const char * composite_error_string ) GRPC_MUST_USE_RESULT ; <nl> + <nl> # ifndef NDEBUG <nl> void grpc_metadata_batch_assert_ok ( grpc_metadata_batch * comd ) ; <nl> # else <nl>
Fix some auth filtering bugs
grpc/grpc
e150fff57ef947998ce8f45da112ed45ae01bacb
2016-11-23T22:47:11Z
mmm a / src / doc / algorithm / polygon . cpp <nl> ppp b / src / doc / algorithm / polygon . cpp <nl> <nl> <nl> # include " gfx / point . h " <nl> <nl> + # include < algorithm > <nl> # include < vector > <nl> <nl> namespace doc { <nl>
Add missing < algorithm > header to use std : : sort ( )
aseprite/aseprite
8aaf679406fca3922cb69b06f4509f05f9a8d9e7
2019-05-31T17:10:56Z
mmm a / js / server / modules / @ arangodb / foxx / router / router . js <nl> ppp b / js / server / modules / @ arangodb / foxx / router / router . js <nl> const Router = module . exports = <nl> [ [ ' path ' , ' string ' ] , . . . repeat ( Math . max ( 1 , args . length - 2 ) , [ ' handler ' , ' function ' ] ) , [ ' name ' , ' string ' ] ] , <nl> [ [ ' path ' , ' string ' ] , . . . repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) ] , <nl> [ . . . repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) , [ ' name ' , ' string ' ] ] , <nl> - repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) <nl> + repeat ( Math . max ( 1 , args . length ) , [ ' handler ' , ' function ' ] ) <nl> ) ; <nl> const path = argv . path ; <nl> const handler = argv . handler ; <nl> ALL_METHODS . forEach ( function ( method ) { <nl> [ [ ' path ' , ' string ' ] , . . . repeat ( Math . max ( 1 , args . length - 2 ) , [ ' handler ' , ' function ' ] ) , [ ' name ' , ' string ' ] ] , <nl> [ [ ' path ' , ' string ' ] , . . . repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) ] , <nl> [ . . . repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) , [ ' name ' , ' string ' ] ] , <nl> - repeat ( Math . max ( 1 , args . length - 1 ) , [ ' handler ' , ' function ' ] ) <nl> + repeat ( Math . max ( 1 , args . length ) , [ ' handler ' , ' function ' ] ) <nl> ) ; <nl> const path = argv . path ; <nl> const handler = argv . handler ; <nl>
Fix off - by - one in Foxx route
arangodb/arangodb
99ba80a54220d08c13c83cc23316ec664ccf9f55
2016-12-20T00:43:14Z
mmm a / dlib / svm / structural_track_association_trainer . h <nl> ppp b / dlib / svm / structural_track_association_trainer . h <nl> namespace dlib <nl> if ( samples [ i ] [ j ] . size ( ) > 0 ) <nl> { <nl> track_type new_track ; <nl> + new_track . update_track ( samples [ i ] [ j ] [ 0 ] . first ) ; <nl> typename track_type : : feature_vector_type feats ; <nl> new_track . get_similarity_features ( samples [ i ] [ j ] [ 0 ] . first , feats ) ; <nl> return feats . size ( ) ; <nl> mmm a / dlib / svm / track_association_function_abstract . h <nl> ppp b / dlib / svm / track_association_function_abstract . h <nl> namespace dlib <nl> feature_vector_type & feats <nl> ) const ; <nl> / * ! <nl> + requires <nl> + - update_track ( ) has been called on this track at least once . <nl> ensures <nl> - # feats = = A feature vector that contains information describing how <nl> likely it is that det is a detection from the object corresponding to <nl>
Added a guarantee that tracks won ' t be asked for their feature vector until
davisking/dlib
a7d236c04e0f4bdc09dfc4fc901292178b596984
2014-02-21T00:40:17Z
mmm a / Tests / UnitTests / MathPerformanceTests / MathPerformanceTests . vcxproj <nl> ppp b / Tests / UnitTests / MathPerformanceTests / MathPerformanceTests . vcxproj <nl> <nl> < WholeProgramOptimization > true < / WholeProgramOptimization > <nl> < CharacterSet > Unicode < / CharacterSet > <nl> < / PropertyGroup > <nl> + < Choose > <nl> + < When Condition = " Exists ( ' $ ( BOOST_INCLUDE_PATH ) ' ) And Exists ( ' $ ( BOOST_LIB_PATH ) ' ) " > <nl> + < PropertyGroup > <nl> + < HasBoost > true < / HasBoost > <nl> + < / PropertyGroup > <nl> + < / When > <nl> + < Otherwise > <nl> + < PropertyGroup > <nl> + < HasBoost > false < / HasBoost > <nl> + < / PropertyGroup > <nl> + < / Otherwise > <nl> + < / Choose > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . props " / > <nl> < ImportGroup Label = " PropertySheets " > <nl> < Import Project = " $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props " Condition = " exists ( ' $ ( UserRootDir ) \ Microsoft . Cpp . $ ( Platform ) . user . props ' ) " Label = " LocalAppDataPlatform " / > <nl> <nl> < PropertyGroup Label = " UserMacros " / > <nl> < PropertyGroup > <nl> < LinkIncremental > $ ( DebugBuild ) < / LinkIncremental > <nl> + < OutDir > $ ( OutDir ) \ UnitTests \ < / OutDir > <nl> < / PropertyGroup > <nl> < ItemDefinitionGroup > <nl> < ClCompile > <nl> - < AdditionalIncludeDirectories > $ ( SolutionDir ) Source \ Math ; $ ( SolutionDir ) Source \ Common \ Include ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> + < AdditionalIncludeDirectories > $ ( BOOST_INCLUDE_PATH ) ; $ ( SolutionDir ) Source \ Math ; $ ( SolutionDir ) Source \ Common \ Include ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> + < DisableSpecificWarnings > 4819 < / DisableSpecificWarnings > <nl> < / ClCompile > <nl> < Link > <nl> - < AdditionalLibraryDirectories > $ ( OutDir ) < / AdditionalLibraryDirectories > <nl> + < AdditionalLibraryDirectories > $ ( OutDir ) . . ; $ ( BOOST_LIB_PATH ) < / AdditionalLibraryDirectories > <nl> < / Link > <nl> < / ItemDefinitionGroup > <nl> < ItemDefinitionGroup Condition = " $ ( DebugBuild ) " > <nl> <nl> < FunctionLevelLinking > true < / FunctionLevelLinking > <nl> < IntrinsicFunctions > true < / IntrinsicFunctions > <nl> < PreprocessorDefinitions > WIN32 ; NDEBUG ; _CONSOLE ; % ( PreprocessorDefinitions ) < / PreprocessorDefinitions > <nl> + < UseFullPaths > true < / UseFullPaths > <nl> < SDLCheck > true < / SDLCheck > <nl> < OpenMPSupport > true < / OpenMPSupport > <nl> < TreatWarningAsError > true < / TreatWarningAsError > <nl> <nl> < ClCompile > <nl> < AdditionalIncludeDirectories > $ ( CudaToolkitIncludeDir ) ; % ( AdditionalIncludeDirectories ) < / AdditionalIncludeDirectories > <nl> < / ClCompile > <nl> + < Link > <nl> + < AdditionalLibraryDirectories > % ( AdditionalLibraryDirectories ) ; $ ( CudaLibPath ) < / AdditionalLibraryDirectories > <nl> + < / Link > <nl> < / ItemDefinitionGroup > <nl> < ImportGroup Condition = " $ ( GpuBuild ) " Label = " ExtensionSettings " > <nl> < Import Project = " $ ( VCTargetsPath ) \ BuildCustomizations \ CUDA $ ( CudaVersion ) . props " / > <nl> <nl> < ItemGroup > <nl> < ClCompile Include = " . . \ . . \ . . \ Source \ Common \ ExceptionWithCallStack . cpp " / > <nl> < ClCompile Include = " MathPerformanceTests . cpp " / > <nl> + < ClCompile Include = " PerformanceTests . cpp " / > <nl> < ClCompile Include = " stdafx . cpp " > <nl> < PrecompiledHeader > Create < / PrecompiledHeader > <nl> < / ClCompile > <nl> < / ItemGroup > <nl> < Import Project = " $ ( VCTargetsPath ) \ Microsoft . Cpp . targets " / > <nl> + < Target Name = " Build " Condition = " $ ( HasBoost ) " Outputs = " $ ( TargetPath ) " DependsOnTargets = " $ ( BuildDependsOn ) " / > <nl> + < Target Name = " CheckDependencies " > <nl> + < Warning Condition = " ! $ ( HasBoost ) " Text = " MathTests requires the Boost library to build . Please see https : / / github . com / Microsoft / CNTK / wiki / Setup - CNTK - on - Windows # boost for installation instructions . " / > <nl> + < / Target > <nl> < / Project > <nl> \ No newline at end of file <nl> new file mode 100644 <nl> index 00000000000 . . bac719f1218 <nl> mmm / dev / null <nl> ppp b / Tests / UnitTests / MathPerformanceTests / PerformanceTests . cpp <nl> <nl> + / / <nl> + / / Copyright ( c ) Microsoft . All rights reserved . <nl> + / / Licensed under the MIT license . See LICENSE . md file in the project root for full license information . <nl> + / / <nl> + / / Performance unit tests should go here <nl> + / / <nl> + <nl> + # include " stdafx . h " <nl> + # include " Matrix . h " <nl> + # include " CPUMatrix . h " <nl> + # include " TensorView . h " <nl> + # include " Sequences . h " <nl> + # include < chrono > <nl> + # include < iostream > <nl> + # include < vector > <nl> + # include < algorithm > <nl> + <nl> + using namespace Microsoft : : MSR : : CNTK ; <nl> + <nl> + namespace Microsoft { namespace MSR { namespace CNTK { namespace Test { <nl> + <nl> + BOOST_AUTO_TEST_SUITE ( MathPerformance ) <nl> + <nl> + template < typename FN > <nl> + struct TensorTestParameters { <nl> + const char * testString ; <nl> + double tolerance ; <nl> + const FN & fn ; <nl> + } ; <nl> + <nl> + template < typename FN > <nl> + class TensorTest { <nl> + public : <nl> + void OneTensorTest ( const char * what , double tolerance , const FN & fn ) <nl> + { <nl> + cout < < " = = = = = Tensor test ' " < < what < < " ' \ n " ; <nl> + <nl> + / / run on GPU and CPU <nl> + let resultGPU = fn ( 0 ) ; <nl> + let resultCPU = fn ( - 1 ) ; <nl> + <nl> + / / dump top corner of the result to get a feel for the error <nl> + resultGPU . GetSOB ( ) . Print ( " GPU result " , 0 , 7 , 0 , 9 ) ; <nl> + resultGPU . GetSOB ( ) . TransferToDeviceIfNotThere ( - 1 , true , false , true ) ; <nl> + resultCPU . GetSOB ( ) . Print ( " CPU result " , 0 , 7 , 0 , 9 ) ; <nl> + <nl> + / / compare <nl> + let isSame = resultGPU . GetSOB ( ) . IsEqualTo ( resultCPU . GetSOB ( ) , ( ElemType ) tolerance ) ; <nl> + BOOST_CHECK ( isSame ) ; <nl> + } <nl> + } ; <nl> + <nl> + TensorTestParameters < TensorView < float > > parameters [ ] ; <nl> + <nl> + TensorTest < float > tester ; <nl> + <nl> + bool InitUnitTest ( ) <nl> + { <nl> + return false ; <nl> + } <nl> + <nl> + BOOST_AUTO_TEST_SUITE_END ( ) <nl> + } <nl> + } } } <nl> \ No newline at end of file <nl> mmm a / Tests / UnitTests / MathPerformanceTests / stdafx . h <nl> ppp b / Tests / UnitTests / MathPerformanceTests / stdafx . h <nl> <nl> # define _CRT_SECURE_NO_WARNINGS / / " secure " CRT not available on all platforms <nl> # include " targetver . h " <nl> <nl> + # include < boost / test / unit_test . hpp > <nl> # include < stdio . h > <nl> <nl> / / TODO : reference additional headers your program requires here <nl>
Add Boost support for tests
microsoft/CNTK
555b17c4bc71a458ddb0ebc6ba0eef45b6d123a5
2016-07-18T11:25:45Z
mmm a / test / test_torch . py <nl> ppp b / test / test_torch . py <nl> def test_pickle_parameter_no_requires_grad ( self ) : <nl> self . assertEqual ( a . requires_grad , b . requires_grad ) <nl> self . assertEqual ( a , b ) <nl> <nl> + def test_pickle_dtype ( self ) : <nl> + t = torch . float32 <nl> + serialized = pickle . dumps ( t ) <nl> + b = pickle . loads ( serialized ) <nl> + self . assertTrue ( isinstance ( b , torch . dtype ) ) <nl> + self . assertEqual ( id ( b ) , id ( t ) ) <nl> + <nl> def test_norm_fastpaths ( self ) : <nl> x = torch . randn ( 3 , 5 ) <nl> <nl> mmm a / torch / csrc / Dtype . cpp <nl> ppp b / torch / csrc / Dtype . cpp <nl> static PyMethodDef THPDtype_methods [ ] = { <nl> <nl> PyObject * THPDtype_repr ( THPDtype * self ) <nl> { <nl> - return THPUtils_packString ( self - > name ) ; <nl> + std : : string name = self - > name ; <nl> + return THPUtils_packString ( " torch . " + name ) ; <nl> } <nl> <nl> PyTypeObject THPDtypeType = { <nl> mmm a / torch / csrc / utils / tensor_dtypes . cpp <nl> ppp b / torch / csrc / utils / tensor_dtypes . cpp <nl> void initializeDtypes ( ) { <nl> for ( at : : ScalarType scalarType : all_scalar_types ) { <nl> std : : string primary_name , legacy_name ; <nl> std : : tie ( primary_name , legacy_name ) = getDtypeNames ( scalarType ) ; <nl> - std : : string name = <nl> - std : : string ( PyModule_GetName ( torch_module . get ( ) ) ) + ' . ' + primary_name ; <nl> - PyObject * dtype = THPDtype_New ( scalarType , name ) ; <nl> + PyObject * dtype = THPDtype_New ( scalarType , primary_name ) ; <nl> torch : : registerDtypeObject ( ( THPDtype * ) dtype , scalarType ) ; <nl> Py_INCREF ( dtype ) ; <nl> if ( PyModule_AddObject ( torch_module . get ( ) , primary_name . c_str ( ) , dtype ) ! = <nl>
Fix pickling torch . float32 ( )
pytorch/pytorch
88f70a16708369ad5d179fbe515f43739c0f2591
2019-04-18T19:28:10Z
mmm a / src / rdb_protocol / btree . cc <nl> ppp b / src / rdb_protocol / btree . cc <nl> class result_finalizer_visitor_t : public boost : : static_visitor < void > { <nl> } ; <nl> <nl> void rdb_rget_slice ( btree_slice_t * slice , const key_range_t & range , <nl> - # if ! SLICE_ALT <nl> - transaction_t * txn , <nl> - # endif <nl> superblock_t * superblock , <nl> ql : : env_t * ql_env , const ql : : batchspec_t & batchspec , <nl> const rdb_protocol_details : : transform_t & transform , <nl> void rdb_rget_slice ( btree_slice_t * slice , const key_range_t & range , <nl> sorting_t sorting , <nl> rget_read_response_t * response ) { <nl> profile : : starter_t starter ( " Do range scan on primary index . " , ql_env - > trace ) ; <nl> - # if SLICE_ALT <nl> rdb_rget_depth_first_traversal_callback_t callback ( <nl> ql_env , batchspec , transform , terminal , range , sorting , response , slice ) ; <nl> btree_concurrent_traversal ( slice , superblock , range , & callback , <nl> ( ! reversed ( sorting ) ? FORWARD : BACKWARD ) ) ; <nl> - # else <nl> - rdb_rget_depth_first_traversal_callback_t callback ( <nl> - txn , ql_env , batchspec , transform , terminal , range , sorting , response , slice ) ; <nl> - btree_concurrent_traversal ( slice , txn , superblock , range , & callback , <nl> - ( ! reversed ( sorting ) ? FORWARD : BACKWARD ) ) ; <nl> - # endif <nl> <nl> response - > truncated = callback . batcher . should_send_batch ( ) ; <nl> <nl> void rdb_rget_secondary_slice ( <nl> btree_slice_t * slice , <nl> const datum_range_t & sindex_range , <nl> const rdb_protocol_t : : region_t & sindex_region , <nl> - # if ! SLICE_ALT <nl> - transaction_t * txn , <nl> - # endif <nl> superblock_t * superblock , <nl> ql : : env_t * ql_env , <nl> const ql : : batchspec_t & batchspec , <nl> void rdb_rget_secondary_slice ( <nl> sindex_multi_bool_t sindex_multi , <nl> rget_read_response_t * response ) { <nl> profile : : starter_t starter ( " Do range scan on secondary index . " , ql_env - > trace ) ; <nl> - # if SLICE_ALT <nl> rdb_rget_depth_first_traversal_callback_t callback ( <nl> ql_env , batchspec , transform , terminal , sindex_region . inner , pk_range , <nl> sorting , sindex_func , sindex_multi , sindex_range , response , slice ) ; <nl> btree_concurrent_traversal ( <nl> slice , superblock , sindex_region . inner , & callback , <nl> ( ! reversed ( sorting ) ? FORWARD : BACKWARD ) ) ; <nl> - # else <nl> - rdb_rget_depth_first_traversal_callback_t callback ( <nl> - txn , ql_env , batchspec , transform , terminal , sindex_region . inner , pk_range , <nl> - sorting , sindex_func , sindex_multi , sindex_range , response , slice ) ; <nl> - btree_concurrent_traversal ( <nl> - slice , txn , superblock , sindex_region . inner , & callback , <nl> - ( ! reversed ( sorting ) ? FORWARD : BACKWARD ) ) ; <nl> - # endif <nl> <nl> response - > truncated = callback . batcher . should_send_batch ( ) ; <nl> <nl> boost : : apply_visitor ( result_finalizer_visitor_t ( ) , response - > result ) ; <nl> } <nl> <nl> - # if SLICE_ALT <nl> void rdb_distribution_get ( btree_slice_t * slice , int max_depth , <nl> const store_key_t & left_key , <nl> superblock_t * superblock , <nl> distribution_read_response_t * response ) { <nl> - # else <nl> - void rdb_distribution_get ( btree_slice_t * slice , int max_depth , <nl> - const store_key_t & left_key , <nl> - transaction_t * txn , superblock_t * superblock , <nl> - distribution_read_response_t * response ) { <nl> - # endif <nl> int64_t key_count_out ; <nl> std : : vector < store_key_t > key_splits ; <nl> - # if SLICE_ALT <nl> get_btree_key_distribution ( slice , superblock , max_depth , <nl> & key_count_out , & key_splits ) ; <nl> - # else <nl> - get_btree_key_distribution ( slice , txn , superblock , max_depth , & key_count_out , & key_splits ) ; <nl> - # endif <nl> <nl> int64_t keys_per_bucket ; <nl> if ( key_splits . size ( ) = = 0 ) { <nl> RDB_IMPL_ME_SERIALIZABLE_1 ( rdb_erase_range_report_t , range_to_erase ) ; <nl> <nl> rdb_modification_report_cb_t : : rdb_modification_report_cb_t ( <nl> btree_store_t < rdb_protocol_t > * store , <nl> - # if ! SLICE_ALT <nl> - write_token_pair_t * token_pair , <nl> - # endif <nl> - # if SLICE_ALT <nl> alt_buf_lock_t * sindex_block , <nl> - # else <nl> - transaction_t * txn , <nl> - block_id_t sindex_block_id , <nl> - # endif <nl> auto_drainer_t : : lock_t lock ) <nl> - : lock_ ( lock ) , store_ ( store ) <nl> - # if SLICE_ALT <nl> - , sindex_block_ ( sindex_block ) <nl> - # else <nl> - , token_pair_ ( token_pair ) <nl> - , txn_ ( txn ) <nl> - , sindex_block_id_ ( sindex_block_id ) <nl> - # endif <nl> - { <nl> - # if SLICE_ALT <nl> + : lock_ ( lock ) , store_ ( store ) , <nl> + sindex_block_ ( sindex_block ) { <nl> store_ - > acquire_post_constructed_sindex_superblocks_for_write ( <nl> sindex_block_ , & sindexes_ ) ; <nl> - # endif <nl> } <nl> <nl> - rdb_modification_report_cb_t : : ~ rdb_modification_report_cb_t ( ) { <nl> - # if ! SLICE_ALT <nl> - if ( token_pair_ - > sindex_write_token . has ( ) ) { <nl> - token_pair_ - > sindex_write_token . reset ( ) ; <nl> - } <nl> - # endif <nl> - } <nl> + rdb_modification_report_cb_t : : ~ rdb_modification_report_cb_t ( ) { } <nl> <nl> void rdb_modification_report_cb_t : : on_mod_report ( <nl> const rdb_modification_report_t & mod_report ) { <nl> - # if ! SLICE_ALT <nl> - if ( ! sindex_block_ . has ( ) ) { <nl> - / / Don ' t allow interruption here , or we may end up with inconsistent data <nl> - cond_t dummy_interruptor ; <nl> - store_ - > acquire_sindex_block_for_write ( <nl> - token_pair_ , txn_ , & sindex_block_ , <nl> - sindex_block_id_ , & dummy_interruptor ) ; <nl> - <nl> - store_ - > acquire_post_constructed_sindex_superblocks_for_write ( <nl> - sindex_block_ . get ( ) , txn_ , & sindexes_ ) ; <nl> - } <nl> - # endif <nl> - <nl> mutex_t : : acq_t acq ; <nl> - # if SLICE_ALT <nl> store_ - > lock_sindex_queue ( sindex_block_ , & acq ) ; <nl> - # else <nl> - store_ - > lock_sindex_queue ( sindex_block_ . get ( ) , & acq ) ; <nl> - # endif <nl> <nl> write_message_t wm ; <nl> wm < < rdb_sindex_change_t ( mod_report ) ; <nl> store_ - > sindex_queue_push ( wm , & acq ) ; <nl> <nl> - # if SLICE_ALT <nl> rdb_update_sindexes ( sindexes_ , & mod_report , sindex_block_ - > txn ( ) ) ; <nl> - # else <nl> - rdb_update_sindexes ( sindexes_ , & mod_report , txn_ ) ; <nl> - # endif <nl> } <nl> <nl> typedef btree_store_t < rdb_protocol_t > : : sindex_access_vector_t sindex_access_vector_t ; <nl> void compute_keys ( const store_key_t & primary_key , counted_t < const ql : : datum_t > d <nl> void rdb_update_single_sindex ( <nl> const btree_store_t < rdb_protocol_t > : : sindex_access_t * sindex , <nl> const rdb_modification_report_t * modification , <nl> - # if ! SLICE_ALT <nl> - transaction_t * txn , <nl> - # endif <nl> auto_drainer_t : : lock_t ) { <nl> / / Note if you get this error it ' s likely that you ' ve passed in a default <nl> / / constructed mod_report . Don ' t do that . Mod reports should always be passed <nl> void rdb_update_single_sindex ( <nl> { <nl> keyvalue_location_t < rdb_value_t > kv_location ; <nl> <nl> - # if SLICE_ALT <nl> find_keyvalue_location_for_write ( super_block , <nl> it - > btree_key ( ) , <nl> & kv_location , <nl> & sindex - > btree - > stats , <nl> env . trace . get_or_null ( ) , <nl> & return_superblock_local ) ; <nl> - # else <nl> - find_keyvalue_location_for_write ( txn , super_block , <nl> - it - > btree_key ( ) , <nl> - & kv_location , <nl> - & sindex - > btree - > root_eviction_priority , <nl> - & sindex - > btree - > stats , <nl> - env . trace . get_or_null ( ) , <nl> - & return_superblock_local ) ; <nl> - # endif <nl> <nl> if ( kv_location . value . has ( ) ) { <nl> - # if SLICE_ALT <nl> kv_location_delete ( & kv_location , * it , <nl> repli_timestamp_t : : distant_past , NULL ) ; <nl> - # else <nl> - kv_location_delete ( & kv_location , * it , <nl> - sindex - > btree , repli_timestamp_t : : distant_past , txn , NULL ) ; <nl> - # endif <nl> } <nl> / / The keyvalue location gets destroyed here . <nl> } <nl> void rdb_update_single_sindex ( <nl> { <nl> keyvalue_location_t < rdb_value_t > kv_location ; <nl> <nl> - # if SLICE_ALT <nl> find_keyvalue_location_for_write ( super_block , <nl> it - > btree_key ( ) , <nl> & kv_location , <nl> & sindex - > btree - > stats , <nl> env . trace . get_or_null ( ) , <nl> & return_superblock_local ) ; <nl> - # else <nl> - find_keyvalue_location_for_write ( txn , super_block , <nl> - it - > btree_key ( ) , <nl> - & kv_location , <nl> - & sindex - > btree - > root_eviction_priority , <nl> - & sindex - > btree - > stats , <nl> - env . trace . get_or_null ( ) , <nl> - & return_superblock_local ) ; <nl> - # endif <nl> <nl> - # if SLICE_ALT <nl> kv_location_set ( & kv_location , * it , <nl> modification - > info . added . second , <nl> repli_timestamp_t : : distant_past ) ; <nl> - # else <nl> - kv_location_set ( & kv_location , * it , <nl> - modification - > info . added . second , sindex - > btree , <nl> - repli_timestamp_t : : distant_past , txn ) ; <nl> - # endif <nl> / / The keyvalue location gets destroyed here . <nl> } <nl> super_block = return_superblock_local . wait ( ) ; <nl>
Removed SLICE_ALT in btree . cc up to rdb_update_sindexes .
rethinkdb/rethinkdb
01671c880fc5a6eee5be23c9fa867abeafbf4f10
2014-01-16T03:27:42Z
mmm a / html / admin / css / dashboardView . css <nl> ppp b / html / admin / css / dashboardView . css <nl> <nl> margin - top : 6px ; <nl> } <nl> <nl> + . group - close , . group - open { <nl> + float : right ; <nl> + margin - top : 17px ! important ; <nl> + margin - right : 7px ! important ; <nl> + } <nl> + <nl> . db - zoom , . db - minimize , . db - hide , . db - info { <nl> float : right ; <nl> margin - top : - 4px ! important ; <nl> margin - right : 4px ! important ; <nl> } <nl> <nl> - . db - zoom : hover , . db - minimize : hover , . db - hide , . db - info : hover { <nl> + . db - zoom : hover , . db - minimize : hover , . db - hide , . db - info : hover , . group - close : hover , . group - open : hover { <nl> cursor : pointer ; <nl> } <nl> <nl> + . groupHidden li { <nl> + display : none ; <nl> + } <nl> + <nl> . statGroups { <nl> margin - left : 0px ; <nl> float : left ; <nl> mmm a / html / admin / js / lib / nv . d3 . js <nl> ppp b / html / admin / js / lib / nv . d3 . js <nl> nv . models . axis = function ( ) { <nl> . attr ( ' transform ' , function ( d , i , j ) { return ' rotate ( ' + rotateLabels + ' 0 , 0 ) ' } ) <nl> . attr ( ' text - anchor ' , rotateLabels % 360 > 0 ? ' start ' : ' end ' ) ; <nl> } <nl> - axisLabel . enter ( ) . append ( ' text ' ) . attr ( ' class ' , ' nv - axislabel ' ) <nl> + axisLabel . enter ( ) . append ( ' text ' ) . attr ( ' class ' , ' nv - axislabel nv - x - axislabel ' ) <nl> . attr ( ' text - anchor ' , ' middle ' ) <nl> + . attr ( ' class ' , ' heikotestclass ' ) <nl> . attr ( ' y ' , xLabelMargin ) ; <nl> var w = ( scale . range ( ) . length = = 2 ) ? scale . range ( ) [ 1 ] : ( scale . range ( ) [ scale . range ( ) . length - 1 ] + ( scale . range ( ) [ 1 ] - scale . range ( ) [ 0 ] ) ) ; <nl> axisLabel <nl> mmm a / html / admin / js / views / dashboardView . js <nl> ppp b / html / admin / js / views / dashboardView . js <nl> var dashboardView = Backbone . View . extend ( { <nl> updateInterval : 1000 , / / 1 second , constant <nl> updateFrequency : 5 , / / the actual update rate ( 5 s ) <nl> updateCounter : 0 , <nl> - arraySize : 99 , / / how many values will we keep per figure ? <nl> + arraySize : 20 , / / how many values will we keep per figure ? <nl> seriesData : { } , <nl> charts : { } , <nl> units : [ ] , <nl> var dashboardView = Backbone . View . extend ( { <nl> <nl> events : { <nl> " click . dashboard - dropdown li " : " checkEnabled " , <nl> - " click . interval - dropdown li " : " checkInterval " , <nl> - " click . db - zoom " : " renderDetailChart " , <nl> - " click . db - minimize " : " checkDetailChart " , <nl> - " click . db - hide " : " hideChart " <nl> + " click . interval - dropdown li " : " checkInterval " , <nl> + " click . db - zoom " : " renderDetailChart " , <nl> + " click . db - minimize " : " checkDetailChart " , <nl> + " click . db - hide " : " hideChart " , <nl> + " click . group - close " : " hideGroup " , <nl> + " click . group - open " : " showGroup " <nl> } , <nl> <nl> template : new EJS ( { url : ' js / templates / dashboardView . ejs ' } ) , <nl> var dashboardView = Backbone . View . extend ( { <nl> <nl> var counter = 1 ; <nl> $ . each ( this . options . description . models [ 0 ] . attributes . groups , function ( ) { <nl> - console . log ( self . options . description . models [ 0 ] . attributes . groups . length ) ; <nl> + console . log ( this ) ; <nl> $ ( ' . thumbnails ' ) . append ( <nl> ' < ul class = " statGroups " id = " ' + this . group + ' " > ' + <nl> + ' < i class = " group - close icon - minus icon - white " > < / i > ' + <nl> ' < h4 class = " statsHeader " > ' + this . name + ' < / h4 > ' + <nl> ' < / ul > ' ) ; <nl> $ ( ' # menuGroups ' ) . append ( ' < li class = " nav - header " > ' + this . name + ' < / li > ' ) ; <nl> var dashboardView = Backbone . View . extend ( { <nl> } <nl> } , <nl> <nl> + hideGroup : function ( a ) { <nl> + var group = $ ( a . target ) . parent ( ) ; <nl> + $ ( a . target ) . removeClass ( ' icon - minus group - close ' ) ; <nl> + $ ( a . target ) . addClass ( ' icon - plus group - open ' ) ; <nl> + $ ( group ) . addClass ( " groupHidden " ) ; <nl> + } , <nl> + <nl> + showGroup : function ( a ) { <nl> + var group = $ ( a . target ) . parent ( ) ; <nl> + $ ( a . target ) . removeClass ( ' icon - plus group - open ' ) ; <nl> + $ ( a . target ) . addClass ( ' icon - minus group - close ' ) ; <nl> + $ ( group ) . removeClass ( " groupHidden " ) ; <nl> + } , <nl> + <nl> hideChart : function ( a ) { <nl> var figure = $ ( a . target ) . attr ( " value " ) ; <nl> $ ( ' # ' + figure + ' Checkbox ' ) . prop ( ' checked ' , false ) ; <nl>
css + closeable groups
arangodb/arangodb
fb1dfcfdd891806fc762d2ed2300933d07359aa3
2013-05-22T11:46:24Z
mmm a / dbms / programs / client / Client . cpp <nl> ppp b / dbms / programs / client / Client . cpp <nl> class Client : public Poco : : Util : : Application <nl> if ( ! parsed_query ) <nl> return true ; <nl> <nl> - / / / Replace ASTQueryParameter with ASTLiteral for prepared statements . <nl> - ReplaceQueryParameterVisitor visitor ( query_parameters ) ; <nl> - visitor . visit ( parsed_query ) ; <nl> - <nl> - / / / Get new query after substitutions . <nl> - query = serializeAST ( * parsed_query ) ; <nl> - <nl> processed_rows = 0 ; <nl> progress . reset ( ) ; <nl> show_progress_bar = false ; <nl> class Client : public Poco : : Util : : Application <nl> / / / Process the query that doesn ' t require transferring data blocks to the server . <nl> void processOrdinaryQuery ( ) <nl> { <nl> + / / / Replace ASTQueryParameter with ASTLiteral for prepared statements . <nl> + ReplaceQueryParameterVisitor visitor ( query_parameters ) ; <nl> + visitor . visit ( parsed_query ) ; <nl> + <nl> + / / / Get new query after substitutions . Note that it cannot be done for INSERT query with embedded data . <nl> + query = serializeAST ( * parsed_query ) ; <nl> + <nl> connection - > sendQuery ( query , query_id , QueryProcessingStage : : Complete , & context . getSettingsRef ( ) , nullptr , true ) ; <nl> sendExternalTables ( ) ; <nl> receiveResult ( ) ; <nl> mmm a / dbms / src / Interpreters / executeQuery . cpp <nl> ppp b / dbms / src / Interpreters / executeQuery . cpp <nl> static std : : tuple < ASTPtr , BlockIO > executeQueryImpl ( <nl> / / / TODO Parser should fail early when max_query_size limit is reached . <nl> ast = parseQuery ( parser , begin , end , " " , max_query_size ) ; <nl> <nl> - / / / Replace ASTQueryParameter with ASTLiteral for prepared statements . <nl> - ReplaceQueryParameterVisitor visitor ( context . getQueryParameters ( ) ) ; <nl> - visitor . visit ( ast ) ; <nl> - <nl> auto * insert_query = ast - > as < ASTInsertQuery > ( ) ; <nl> <nl> if ( insert_query & & insert_query - > settings_ast ) <nl> static std : : tuple < ASTPtr , BlockIO > executeQueryImpl ( <nl> insert_query - > has_tail = has_query_tail ; <nl> } <nl> else <nl> + { <nl> query_end = end ; <nl> + } <nl> } <nl> catch ( . . . ) <nl> { <nl> static std : : tuple < ASTPtr , BlockIO > executeQueryImpl ( <nl> <nl> try <nl> { <nl> + / / / Replace ASTQueryParameter with ASTLiteral for prepared statements . <nl> + ReplaceQueryParameterVisitor visitor ( context . getQueryParameters ( ) ) ; <nl> + visitor . visit ( ast ) ; <nl> + <nl> / / / Get new query after substitutions . <nl> if ( context . hasQueryParameters ( ) ) <nl> query = serializeAST ( * ast ) ; <nl> mmm a / dbms / tests / queries / 0_stateless / 00954_client_prepared_statements . reference <nl> ppp b / dbms / tests / queries / 0_stateless / 00954_client_prepared_statements . reference <nl> <nl> 1 Hello , world 2005 - 05 - 05 05 : 05 : 05 <nl> 2 test 2005 - 05 - 25 15 : 00 : 00 <nl> 2 test 2005 - 05 - 25 15 : 00 : 00 <nl> + Code : 36 . DB : : Exception : Substitution ` s ` is not set <nl> mmm a / dbms / tests / queries / 0_stateless / 00955_complex_prepared_statements . sh <nl> ppp b / dbms / tests / queries / 0_stateless / 00955_complex_prepared_statements . sh <nl> <nl> CURDIR = $ ( cd " $ ( dirname " $ { BASH_SOURCE [ 0 ] } " ) " & & pwd ) <nl> . $ CURDIR / . . / shell_config . sh <nl> <nl> - EXCEPTION_TEXT = " Code : 36 . DB : : Exception : Expected correct value in parameter with name ' injection ' " <nl> + EXCEPTION_TEXT = " Code : 36 . " <nl> EXCEPTION_SUCCESS_TEXT = " OK " <nl> EXCEPTION_FAIL_TEXT = " FAIL " <nl> <nl>
Fixed tests
ClickHouse/ClickHouse
b079631f610159e0c9ca7f289e7a5dc99319453e
2019-06-15T18:22:48Z
mmm a / test / jit / test_tracer . py <nl> ppp b / test / jit / test_tracer . py <nl> def tensor_size ( x : torch . Tensor ) - > torch . Tensor : <nl> torch . tensor ( [ 15 ] ) <nl> ) <nl> <nl> + @ torch . jit . script <nl> + def use_device ( x ) : <nl> + return torch . zeros_like ( x , device = x . device ) <nl> + <nl> + def foo ( x ) : <nl> + return use_device ( x ) <nl> + <nl> + traced_tensor_size = torch . jit . trace ( foo , torch . rand ( 7 , ) ) <nl> + self . run_pass ( ' inline ' , traced_tensor_size . graph ) <nl> + FileCheck ( ) . check ( " prim : : device " ) . run ( traced_tensor_size . graph ) <nl> + <nl> @ unittest . skipIf ( IS_WINDOWS , " temp file name on windows " ) <nl> def test_trace_save ( self ) : <nl> def fn ( x ) : <nl> mmm a / torch / csrc / jit / passes / peephole . cpp <nl> ppp b / torch / csrc / jit / passes / peephole . cpp <nl> struct PeepholeOptimizeImpl { <nl> } <nl> } <nl> } <nl> - } else if ( node - > matches ( <nl> - " aten : : is_floating_point ( Tensor self ) - > bool " ) ) { <nl> + } else if ( <nl> + node - > matches ( " aten : : is_floating_point ( Tensor self ) - > bool " ) & & <nl> + shape_peepholes_ ) { <nl> auto ptt = node - > inputs ( ) . at ( 0 ) - > type ( ) - > cast < TensorType > ( ) ; <nl> if ( auto maybe_dtype = ptt - > scalarType ( ) ) { <nl> c10 : : ScalarType dtype = * maybe_dtype ; <nl> struct PeepholeOptimizeImpl { <nl> " as input type subtypes output type " ) ; <nl> node - > output ( ) - > replaceAllUsesWith ( node - > input ( ) ) ; <nl> } <nl> - } else if ( node - > matches ( " prim : : dtype ( Tensor a ) - > int " ) ) { <nl> + } else if ( <nl> + node - > matches ( " prim : : dtype ( Tensor a ) - > int " ) & & shape_peepholes_ ) { <nl> auto ptt = node - > input ( ) - > type ( ) - > expect < TensorType > ( ) ; <nl> if ( ptt - > scalarType ( ) ) { <nl> WithInsertPoint guard ( node ) ; <nl> struct PeepholeOptimizeImpl { <nl> output - > debugName ( ) ) ; <nl> node - > output ( ) - > replaceAllUsesWith ( output ) ; <nl> } <nl> - } else if ( node - > matches ( " prim : : device ( Tensor a ) - > Device " ) ) { <nl> + } else if ( <nl> + node - > matches ( " prim : : device ( Tensor a ) - > Device " ) & & <nl> + shape_peepholes_ ) { <nl> auto ptt = node - > input ( ) - > type ( ) - > expect < TensorType > ( ) ; <nl> if ( ptt - > device ( ) ) { <nl> WithInsertPoint guard ( node ) ; <nl> struct PeepholeOptimizeImpl { <nl> output - > debugName ( ) ) ; <nl> node - > output ( ) - > replaceAllUsesWith ( output ) ; <nl> } <nl> - } else if ( node - > matches ( " prim : : is_cuda ( Tensor a ) - > bool " ) ) { <nl> + } else if ( <nl> + node - > matches ( " prim : : is_cuda ( Tensor a ) - > bool " ) & & <nl> + shape_peepholes_ ) { <nl> auto ptt = node - > input ( ) - > type ( ) - > expect < TensorType > ( ) ; <nl> if ( ptt - > device ( ) ) { <nl> WithInsertPoint guard ( node ) ; <nl> mmm a / torch / onnx / utils . py <nl> ppp b / torch / onnx / utils . py <nl> def _run_symbolic_function ( g , n , inputs , env , operator_export_type = OperatorExpor <nl> # Let the exporter handle and finally eliminate these ops <nl> # ListConstruct and ListUnpack will be erased in the ONNX peephole pass <nl> return None <nl> + elif op_name = = " device " and n . output ( ) . type ( ) . kind ( ) = = " DeviceObjType " : <nl> + return None <nl> elif op_name = = ' Loop ' or op_name = = ' If ' : <nl> new_op_outputs = g . op ( op_name , * inputs , outputs = n . outputsSize ( ) ) <nl> new_node = new_op_outputs [ 0 ] . node ( ) if n . outputsSize ( ) > 1 else new_op_outputs . node ( ) <nl>
[ JIT ] dont optimize device dtype on inline ( )
pytorch/pytorch
1f0dcf39fc616cd0ab67ae1e464637824aadf366
2020-09-12T00:29:54Z
mmm a / tensorflow / compiler / mlir / tensorflow / tests / mark_ops_for_outside_compilation . mlir <nl> ppp b / tensorflow / compiler / mlir / tensorflow / tests / mark_ops_for_outside_compilation . mlir <nl> <nl> / / RUN : tf - opt % s - tf - mark - ops - for - outside - compilation | FILECHECK_OPTS = " " FileCheck % s <nl> <nl> - / / CHECK - LABEL : func @ unsupported_op_no_soft_placement <nl> - func @ unsupported_op_no_soft_placement ( ) - > tensor < i32 > { <nl> + / / CHECK - LABEL : func @ unsupported_op_missing_soft_placement_attribute <nl> + func @ unsupported_op_missing_soft_placement_attribute ( ) - > tensor < i32 > { <nl> % 0 = " tf_device . cluster " ( ) ( { <nl> / / CHECK : " tf . UnsupportedOp " <nl> / / CHECK - NOT : _xla_outside_compilation <nl> func @ unsupported_op_soft_placement_false ( ) - > tensor < i32 > { <nl> return % 0 : tensor < i32 > <nl> } <nl> <nl> + / / CHECK - LABEL : func @ assert_op_string_operand <nl> + func @ assert_op_string_operand ( % arg0 : tensor < ! tf . string > ) - > tensor < i32 > { <nl> + % 0 = " tf_device . cluster " ( ) ( { <nl> + / / CHECK : " tf . Assert " <nl> + / / CHECK - NOT : _xla_outside_compilation <nl> + / / CHECK : " tf . UnsupportedOp " <nl> + / / CHECK - SAME : _xla_outside_compilation <nl> + / / CHECK : " tf . Identity " <nl> + / / CHECK - NOT : _xla_outside_compilation <nl> + % t = constant dense < true > : tensor < i1 > <nl> + " tf . Assert " ( % t , % arg0 ) { summarize = 3 } : ( tensor < i1 > , tensor < ! tf . string > ) - > ( ) <nl> + % 1 = " tf . UnsupportedOp " ( ) { value = dense < 1 > : tensor < i32 > } : ( ) - > tensor < i32 > <nl> + % 2 = " tf . Identity " ( % 1 ) : ( tensor < i32 > ) - > tensor < i32 > <nl> + tf_device . return % 2 : tensor < i32 > <nl> + } ) { allow_soft_placement = true , num_cores_per_replica = 1 , topology = " " , device_assignment = [ ] } : ( ) - > tensor < i32 > <nl> + return % 0 : tensor < i32 > <nl> + } <nl> + <nl> / / CHECK - LABEL : func @ unsupported_op <nl> func @ unsupported_op ( ) - > tensor < i32 > { <nl> % 0 = " tf_device . cluster " ( ) ( { <nl> mmm a / tensorflow / compiler / mlir / tensorflow / transforms / mark_ops_for_outside_compilation . cc <nl> ppp b / tensorflow / compiler / mlir / tensorflow / transforms / mark_ops_for_outside_compilation . cc <nl> bool IsSupportedOp ( Operation & op , <nl> const Dialect * tf_dialect ) { <nl> if ( op . getDialect ( ) ! = tf_dialect ) <nl> return true ; <nl> - else <nl> - return ! HasStringOperand ( op ) & & ! HasStringResult ( op ) & & <nl> - ( MatchesPattern ( op , supported_ops ) | | <nl> - mhlo : : IsOpAllowedTf2XlaFallback ( & op ) ) ; <nl> + / / Assert has a legalization that later removes it so we don ' t want to outside <nl> + / / compile it ever for performance reasons . <nl> + if ( llvm : : isa < TF : : AssertOp > ( op ) ) return true ; <nl> + return ! HasStringOperand ( op ) & & ! HasStringResult ( op ) & & <nl> + ( MatchesPattern ( op , supported_ops ) | | <nl> + mhlo : : IsOpAllowedTf2XlaFallback ( & op ) ) ; <nl> } <nl> <nl> / / Checks all regions of ` op ` for captured string operands . <nl>
Don ' t outside compile tf . Assert Op even if it contains string operands .
tensorflow/tensorflow
f4307fa6f576763591afec4026b1125051b0916a
2020-11-10T20:12:59Z
mmm a / atom / browser / api / atom_api_notification . cc <nl> ppp b / atom / browser / api / atom_api_notification . cc <nl> <nl> # include " atom / common / native_mate_converters / gfx_converter . h " <nl> # include " atom / common / native_mate_converters / image_converter . h " <nl> # include " atom / common / native_mate_converters / string16_converter . h " <nl> + # include " base / guid . h " <nl> # include " base / strings / utf_string_conversions . h " <nl> # include " brightray / browser / browser_client . h " <nl> # include " native_mate / constructor . h " <nl> void Notification : : Close ( ) { <nl> void Notification : : Show ( ) { <nl> Close ( ) ; <nl> if ( presenter_ ) { <nl> - notification_ = presenter_ - > CreateNotification ( this ) ; <nl> + notification_ = presenter_ - > CreateNotification ( this , base : : GenerateGUID ( ) ) ; <nl> if ( notification_ ) { <nl> brightray : : NotificationOptions options ; <nl> options . title = title_ ; <nl> mmm a / brightray / browser / notification . h <nl> ppp b / brightray / browser / notification . h <nl> class Notification { <nl> } <nl> <nl> void set_delegate ( NotificationDelegate * delegate ) { delegate_ = delegate ; } <nl> + void set_notification_id ( const std : : string & id ) { notification_id_ = id ; } <nl> + <nl> NotificationDelegate * delegate ( ) const { return delegate_ ; } <nl> NotificationPresenter * presenter ( ) const { return presenter_ ; } <nl> + const std : : string & notification_id ( ) const { return notification_id_ ; } <nl> <nl> protected : <nl> Notification ( NotificationDelegate * delegate , <nl> class Notification { <nl> private : <nl> NotificationDelegate * delegate_ ; <nl> NotificationPresenter * presenter_ ; <nl> + std : : string notification_id_ ; <nl> <nl> base : : WeakPtrFactory < Notification > weak_factory_ ; <nl> <nl> mmm a / brightray / browser / notification_presenter . cc <nl> ppp b / brightray / browser / notification_presenter . cc <nl> NotificationPresenter : : ~ NotificationPresenter ( ) { <nl> } <nl> <nl> base : : WeakPtr < Notification > NotificationPresenter : : CreateNotification ( <nl> - NotificationDelegate * delegate ) { <nl> + NotificationDelegate * delegate , <nl> + const std : : string & notification_id ) { <nl> Notification * notification = CreateNotificationObject ( delegate ) ; <nl> + notification - > set_notification_id ( notification_id ) ; <nl> notifications_ . insert ( notification ) ; <nl> return notification - > GetWeakPtr ( ) ; <nl> } <nl> void NotificationPresenter : : RemoveNotification ( Notification * notification ) { <nl> delete notification ; <nl> } <nl> <nl> + void NotificationPresenter : : CloseNotificationWithId ( <nl> + const std : : string & notification_id ) { <nl> + auto it = std : : find_if ( notifications_ . begin ( ) , notifications_ . end ( ) , <nl> + [ & notification_id ] ( const Notification * n ) { <nl> + return n - > notification_id ( ) = = notification_id ; <nl> + } ) ; <nl> + if ( it ! = notifications_ . end ( ) ) <nl> + ( * it ) - > Dismiss ( ) ; <nl> + } <nl> + <nl> } / / namespace brightray <nl> mmm a / brightray / browser / notification_presenter . h <nl> ppp b / brightray / browser / notification_presenter . h <nl> <nl> # define BRIGHTRAY_BROWSER_NOTIFICATION_PRESENTER_H_ <nl> <nl> # include < set > <nl> + # include < string > <nl> <nl> # include " base / memory / weak_ptr . h " <nl> <nl> class NotificationPresenter { <nl> virtual ~ NotificationPresenter ( ) ; <nl> <nl> base : : WeakPtr < Notification > CreateNotification ( <nl> - NotificationDelegate * delegate ) ; <nl> + NotificationDelegate * delegate , <nl> + const std : : string & notification_id ) ; <nl> + void CloseNotificationWithId ( const std : : string & notification_id ) ; <nl> <nl> std : : set < Notification * > notifications ( ) const { return notifications_ ; } <nl> <nl> mmm a / brightray / browser / platform_notification_service . cc <nl> ppp b / brightray / browser / platform_notification_service . cc <nl> namespace brightray { <nl> <nl> namespace { <nl> <nl> - void RemoveNotification ( base : : WeakPtr < Notification > notification ) { <nl> - if ( notification ) <nl> - notification - > Dismiss ( ) ; <nl> - } <nl> - <nl> void OnWebNotificationAllowed ( base : : WeakPtr < Notification > notification , <nl> const SkBitmap & icon , <nl> const content : : PlatformNotificationData & data , <nl> void PlatformNotificationService : : DisplayNotification ( <nl> const std : : string & notification_id , <nl> const GURL & origin , <nl> const content : : PlatformNotificationData & notification_data , <nl> - const content : : NotificationResources & notification_resources , <nl> - base : : Closure * cancel_callback ) { <nl> + const content : : NotificationResources & notification_resources ) { <nl> auto * presenter = browser_client_ - > GetNotificationPresenter ( ) ; <nl> if ( ! presenter ) <nl> return ; <nl> NotificationDelegateImpl * delegate = <nl> new NotificationDelegateImpl ( notification_id ) ; <nl> - auto notification = presenter - > CreateNotification ( delegate ) ; <nl> + auto notification = presenter - > CreateNotification ( delegate , notification_id ) ; <nl> if ( notification ) { <nl> - * cancel_callback = base : : Bind ( & RemoveNotification , notification ) ; <nl> browser_client_ - > WebNotificationAllowed ( <nl> render_process_id_ , base : : Bind ( & OnWebNotificationAllowed , notification , <nl> notification_resources . notification_icon , <nl> void PlatformNotificationService : : ClosePersistentNotification ( <nl> content : : BrowserContext * browser_context , <nl> const std : : string & notification_id ) { } <nl> <nl> + void PlatformNotificationService : : CloseNotification ( <nl> + content : : BrowserContext * browser_context , <nl> + const std : : string & notification_id ) { <nl> + auto presenter = browser_client_ - > GetNotificationPresenter ( ) ; <nl> + if ( ! presenter ) <nl> + return ; <nl> + presenter - > CloseNotificationWithId ( notification_id ) ; <nl> + } <nl> + <nl> void PlatformNotificationService : : GetDisplayedNotifications ( <nl> content : : BrowserContext * browser_context , <nl> const DisplayedNotificationsCallback & callback ) { } <nl> mmm a / brightray / browser / platform_notification_service . h <nl> ppp b / brightray / browser / platform_notification_service . h <nl> class PlatformNotificationService <nl> const std : : string & notification_id , <nl> const GURL & origin , <nl> const content : : PlatformNotificationData & notification_data , <nl> - const content : : NotificationResources & notification_resources , <nl> - base : : Closure * cancel_callback ) override ; <nl> + const content : : NotificationResources & notification_resources ) override ; <nl> void DisplayPersistentNotification ( <nl> content : : BrowserContext * browser_context , <nl> const std : : string & notification_id , <nl> class PlatformNotificationService <nl> const content : : NotificationResources & notification_resources ) override ; <nl> void ClosePersistentNotification ( content : : BrowserContext * browser_context , <nl> const std : : string & notification_id ) override ; <nl> + void CloseNotification ( content : : BrowserContext * browser_context , <nl> + const std : : string & notification_id ) override ; <nl> void GetDisplayedNotifications ( <nl> content : : BrowserContext * browser_context , <nl> const DisplayedNotificationsCallback & callback ) override ; <nl>
REVIEW : Remove the notification close - closure in favour of an explicit method
electron/electron
8e125b2953eba9eda94495b2d740e5c52c127c51
2018-06-19T01:49:40Z
mmm a / test / cpp / jit / test_alias_analysis . h <nl> ppp b / test / cpp / jit / test_alias_analysis . h <nl> void testAliasRegistration ( ) { <nl> } <nl> { <nl> auto registry = torch : : RegisterOperators ( ) . op ( <nl> - " aten : : rand6 ( Tensor arg1 ) - > Tensor " , <nl> + " foo : : rand6 ( Tensor arg1 ) - > Tensor " , <nl> torch : : RegisterOperators : : options ( ) <nl> . catchAllKernel ( [ ] ( at : : Tensor ) - > at : : Tensor { <nl> return at : : rand ( { 2 , 2 } ) ; <nl> } ) <nl> . aliasAnalysis ( AliasAnalysisKind : : FROM_SCHEMA ) ) ; <nl> - const auto rand_op = Symbol : : fromQualString ( " aten : : rand6 " ) ; <nl> + const auto rand_op = Symbol : : fromQualString ( " foo : : rand6 " ) ; <nl> auto graph = std : : make_shared < Graph > ( ) ; <nl> auto a = graph - > addInput ( ) ; <nl> auto b = graph - > insert ( rand_op , { a } ) ; <nl> void testAliasRegistration ( ) { <nl> } <nl> { <nl> auto registry = torch : : RegisterOperators ( ) . op ( <nl> - " aten : : rand7 ( Tensor ( a ) arg1 ) - > Tensor ( a ) " , <nl> + " foo : : rand7 ( Tensor ( a ) arg1 ) - > Tensor ( a ) " , <nl> torch : : RegisterOperators : : options ( ) <nl> . catchAllKernel ( [ ] ( at : : Tensor t ) - > at : : Tensor { return t * 2 ; } ) <nl> . aliasAnalysis ( AliasAnalysisKind : : FROM_SCHEMA ) ) ; <nl> - const auto rand_op = Symbol : : fromQualString ( " aten : : rand7 " ) ; <nl> + const auto rand_op = Symbol : : fromQualString ( " foo : : rand " ) ; <nl> + <nl> + <nl> + <nl> + <nl> auto graph = std : : make_shared < Graph > ( ) ; <nl> auto a = graph - > addInput ( ) ; <nl> auto b = graph - > insert ( rand_op , { a } ) ; <nl> void testAliasRegistration ( ) { <nl> } <nl> { <nl> auto registry = torch : : RegisterOperators ( ) . op ( <nl> - " aten : : rand8 ( Tensor ( a ) arg1 ) - > Tensor ( b ) " , <nl> + " foo : : rand8 ( Tensor ( a ) arg1 ) - > Tensor ( b ) " , <nl> torch : : RegisterOperators : : options ( ) <nl> . catchAllKernel ( [ ] ( at : : Tensor t ) - > at : : Tensor { return t * 2 ; } ) <nl> . aliasAnalysis ( AliasAnalysisKind : : FROM_SCHEMA ) ) ; <nl> - const auto rand_op = Symbol : : fromQualString ( " aten : : rand8 " ) ; <nl> + const auto rand_op = Symbol : : fromQualString ( " foo : : rand8 " ) ; <nl> auto graph = std : : make_shared < Graph > ( ) ; <nl> auto a = graph - > addInput ( ) ; <nl> auto b = graph - > insert ( rand_op , { a } ) ; <nl> mmm a / torch / csrc / jit / ir . cpp <nl> ppp b / torch / csrc / jit / ir . cpp <nl> bool Node : : hasSideEffects ( ) const { <nl> " doesn ' t have one either . We don ' t know if this op has side effects . " ) ; <nl> return false ; <nl> } <nl> + <nl> if ( kind_ . is_prim ( ) | | kind_ . is_aten ( ) ) { <nl> - / / TODO This assert is only introduced to check that we don ' t break the <nl> - / / current code base . Remove this later to allow other ops to use <nl> - / / AliasAnalysisKind : : FROM_SCHEMA <nl> + / / TODO There is nothing in the system that relies on aten : : and prim : : <nl> + / / ops using AliasAnalysisKind : : FROM_SCHEMA or AliasAnalysisKind : : INTERNAL_SPECIAL_CASE , <nl> + / / but this is the intended behavior for all current ops and a good error check . <nl> + / / We can consider lifting this constraint later if we have a use case for it . <nl> TORCH_INTERNAL_ASSERT ( <nl> op - > aliasAnalysisKind ( ) = = AliasAnalysisKind : : INTERNAL_SPECIAL_CASE | | <nl> op - > aliasAnalysisKind ( ) = = AliasAnalysisKind : : FROM_SCHEMA , <nl> bool Node : : hasSideEffects ( ) const { <nl> " has " , <nl> toString ( op - > aliasAnalysisKind ( ) ) ) ; <nl> } <nl> + <nl> switch ( op - > aliasAnalysisKind ( ) ) { <nl> case AliasAnalysisKind : : PURE : <nl> return false ; <nl> mmm a / torch / csrc / jit / passes / alias_analysis . cpp <nl> ppp b / torch / csrc / jit / passes / alias_analysis . cpp <nl> void AliasDb : : analyzeImpl ( Node * node ) { <nl> " Special cases should be handled already if we ' re here . " ) ; <nl> <nl> if ( node - > kind ( ) . is_aten ( ) | | node - > kind ( ) . is_prim ( ) ) { <nl> - / / TODO This assert is only introduced to check that we don ' t break the <nl> - / / current code base . Remove this later to allow aten : : and prim : : ops to <nl> - / / use other alias analysis kinds . <nl> + / / TODO There is nothing in the system that relies on aten : : and prim : : <nl> + / / ops using AliasAnalysisKind : : FROM_SCHEMA or AliasAnalysisKind : : INTERNAL_SPECIAL_CASE , <nl> + / / but this is the intended behavior for all current ops and a good error check . <nl> + / / We can consider lifting this constraint later if we have a use case for it . <nl> TORCH_INTERNAL_ASSERT ( <nl> analysis = = AliasAnalysisKind : : FROM_SCHEMA , <nl> " aten : : and prim : : operators should use AliasAnalysisKind : : FROM_SCHEMA but " , <nl> void AliasDb : : analyzeImpl ( Node * node ) { <nl> " AliasAnalysisKind : : CONSERVATIVE / PURE / INTERNAL_SPECIAL_CASE should already have been handled above " ) ; <nl> const auto & schema = node - > schema ( ) ; <nl> <nl> - / / TODO This assert is only introduced to check that we don ' t break the <nl> - / / current code base . Remove this later to allow other ops to use <nl> - / / AliasAnalysisKind : : FROM_SCHEMA <nl> - TORCH_INTERNAL_ASSERT ( <nl> - node - > kind ( ) . is_prim ( ) | | node - > kind ( ) . is_aten ( ) , <nl> - " The current code base should only have AliasAnalysisKind : : FROM_SCHEMA for aten : : and prim : : ops but we found it for " , <nl> - node - > kind ( ) . toDisplayString ( ) , <nl> - " . We want to open this up though . " ) ; <nl> - <nl> / / Bind the schema ' s " formal " alias annotation to the actual values those <nl> / / schema arguments represent <nl> std : : unordered_map < Symbol , Value * > formalToActual ; <nl>
Open up AliasAnalysisKind for any ops ( )
pytorch/pytorch
3ad9dbf9d51f7fd6bc02d9c96b247d30927c1f17
2019-08-05T20:18:12Z
mmm a / src / wallet / rpcwallet . cpp <nl> ppp b / src / wallet / rpcwallet . cpp <nl> static UniValue getnewaddress ( const JSONRPCRequest & request ) <nl> } , <nl> } . ToString ( ) ) ; <nl> <nl> - / / Belt and suspenders check for disabled private keys <nl> - if ( pwallet - > IsWalletFlagSet ( WALLET_FLAG_DISABLE_PRIVATE_KEYS ) ) { <nl> - throw JSONRPCError ( RPC_WALLET_ERROR , " Error : Private keys are disabled for this wallet " ) ; <nl> - } <nl> - <nl> LOCK ( pwallet - > cs_wallet ) ; <nl> <nl> if ( ! pwallet - > CanGetAddresses ( ) ) { <nl> throw JSONRPCError ( RPC_WALLET_ERROR , " Error : This wallet has no available keys " ) ; <nl> } <nl> <nl> - <nl> / / Parse the label first so we don ' t generate a key if there ' s an error <nl> std : : string label ; <nl> if ( ! request . params [ 0 ] . isNull ( ) ) <nl> static UniValue getrawchangeaddress ( const JSONRPCRequest & request ) <nl> } , <nl> } . ToString ( ) ) ; <nl> <nl> - / / Belt and suspenders check for disabled private keys <nl> - if ( pwallet - > IsWalletFlagSet ( WALLET_FLAG_DISABLE_PRIVATE_KEYS ) ) { <nl> - throw JSONRPCError ( RPC_WALLET_ERROR , " Error : Private keys are disabled for this wallet " ) ; <nl> - } <nl> - <nl> LOCK ( pwallet - > cs_wallet ) ; <nl> <nl> if ( ! pwallet - > CanGetAddresses ( true ) ) { <nl> static UniValue getwalletinfo ( const JSONRPCRequest & request ) <nl> obj . pushKV ( " keypoololdest " , pwallet - > GetOldestKeyPoolTime ( ) ) ; <nl> obj . pushKV ( " keypoolsize " , ( int64_t ) kpExternalSize ) ; <nl> CKeyID seed_id = pwallet - > GetHDChain ( ) . seed_id ; <nl> - if ( ! seed_id . IsNull ( ) & & pwallet - > CanSupportFeature ( FEATURE_HD_SPLIT ) ) { <nl> + if ( pwallet - > CanSupportFeature ( FEATURE_HD_SPLIT ) ) { <nl> obj . pushKV ( " keypoolsize_hd_internal " , ( int64_t ) ( pwallet - > GetKeyPoolSize ( ) - kpExternalSize ) ) ; <nl> } <nl> if ( pwallet - > IsCrypted ( ) ) { <nl> mmm a / src / wallet / wallet . cpp <nl> ppp b / src / wallet / wallet . cpp <nl> bool CWallet : : CreateTransaction ( interfaces : : Chain : : Lock & locked_chain , const std <nl> / / post - backup change . <nl> <nl> / / Reserve a new key pair from key pool <nl> - if ( IsWalletFlagSet ( WALLET_FLAG_DISABLE_PRIVATE_KEYS ) ) { <nl> - strFailReason = _ ( " Can ' t generate a change - address key . Private keys are disabled for this wallet . " ) ; <nl> + if ( ! CanGetAddresses ( true ) ) { <nl> + strFailReason = _ ( " Can ' t generate a change - address key . No keys in the internal keypool and can ' t generate any keys . " ) ; <nl> return false ; <nl> } <nl> CPubKey vchPubKey ; <nl> bool CWallet : : ReserveKeyFromKeyPool ( int64_t & nIndex , CKeyPool & keypool , bool fRe <nl> if ( ! IsLocked ( ) ) <nl> TopUpKeyPool ( ) ; <nl> <nl> - bool fReturningInternal = IsHDEnabled ( ) & & CanSupportFeature ( FEATURE_HD_SPLIT ) & & fRequestedInternal ; <nl> + bool fReturningInternal = fRequestedInternal ; <nl> + fReturningInternal & = ( IsHDEnabled ( ) & & CanSupportFeature ( FEATURE_HD_SPLIT ) ) | | IsWalletFlagSet ( WALLET_FLAG_DISABLE_PRIVATE_KEYS ) ; <nl> bool use_split_keypool = set_pre_split_keypool . empty ( ) ; <nl> std : : set < int64_t > & setKeyPool = use_split_keypool ? ( fReturningInternal ? setInternalKeyPool : setExternalKeyPool ) : set_pre_split_keypool ; <nl> <nl> bool CWallet : : ReserveKeyFromKeyPool ( int64_t & nIndex , CKeyPool & keypool , bool fRe <nl> if ( ! batch . ReadPool ( nIndex , keypool ) ) { <nl> throw std : : runtime_error ( std : : string ( __func__ ) + " : read failed " ) ; <nl> } <nl> - if ( ! HaveKey ( keypool . vchPubKey . GetID ( ) ) ) { <nl> + CPubKey pk ; <nl> + if ( ! GetPubKey ( keypool . vchPubKey . GetID ( ) , pk ) ) { <nl> throw std : : runtime_error ( std : : string ( __func__ ) + " : unknown key in key pool " ) ; <nl> } <nl> / / If the key was pre - split keypool , we don ' t care about what type it is <nl> bool CWallet : : GetKeyFromPool ( CPubKey & result , bool internal ) <nl> { <nl> LOCK ( cs_wallet ) ; <nl> int64_t nIndex ; <nl> - if ( ! ReserveKeyFromKeyPool ( nIndex , keypool , internal ) ) { <nl> + if ( ! ReserveKeyFromKeyPool ( nIndex , keypool , internal ) & & ! IsWalletFlagSet ( WALLET_FLAG_DISABLE_PRIVATE_KEYS ) ) { <nl> if ( IsLocked ( ) ) return false ; <nl> WalletBatch batch ( * database ) ; <nl> result = GenerateNewKey ( batch , internal ) ; <nl> mmm a / test / functional / wallet_createwallet . py <nl> ppp b / test / functional / wallet_createwallet . py <nl> def run_test ( self ) : <nl> self . log . info ( " Test disableprivatekeys creation . " ) <nl> self . nodes [ 0 ] . createwallet ( wallet_name = ' w1 ' , disable_private_keys = True ) <nl> w1 = node . get_wallet_rpc ( ' w1 ' ) <nl> - assert_raises_rpc_error ( - 4 , " Error : Private keys are disabled for this wallet " , w1 . getnewaddress ) <nl> - assert_raises_rpc_error ( - 4 , " Error : Private keys are disabled for this wallet " , w1 . getrawchangeaddress ) <nl> + assert_raises_rpc_error ( - 4 , " Error : This wallet has no available keys " , w1 . getnewaddress ) <nl> + assert_raises_rpc_error ( - 4 , " Error : This wallet has no available keys " , w1 . getrawchangeaddress ) <nl> w1 . importpubkey ( w0 . getaddressinfo ( address1 ) [ ' pubkey ' ] ) <nl> <nl> self . log . info ( ' Test that private keys cannot be imported ' ) <nl> def run_test ( self ) : <nl> self . log . info ( " Test blank creation with private keys disabled . " ) <nl> self . nodes [ 0 ] . createwallet ( wallet_name = ' w2 ' , disable_private_keys = True , blank = True ) <nl> w2 = node . get_wallet_rpc ( ' w2 ' ) <nl> - assert_raises_rpc_error ( - 4 , " Error : Private keys are disabled for this wallet " , w2 . getnewaddress ) <nl> - assert_raises_rpc_error ( - 4 , " Error : Private keys are disabled for this wallet " , w2 . getrawchangeaddress ) <nl> + assert_raises_rpc_error ( - 4 , " Error : This wallet has no available keys " , w2 . getnewaddress ) <nl> + assert_raises_rpc_error ( - 4 , " Error : This wallet has no available keys " , w2 . getrawchangeaddress ) <nl> w2 . importpubkey ( w0 . getaddressinfo ( address1 ) [ ' pubkey ' ] ) <nl> <nl> self . log . info ( " Test blank creation with private keys enabled . " ) <nl> def run_test ( self ) : <nl> self . nodes [ 0 ] . createwallet ( wallet_name = ' w5 ' , disable_private_keys = True , blank = True ) <nl> w5 = node . get_wallet_rpc ( ' w5 ' ) <nl> assert_equal ( w5 . getwalletinfo ( ) [ ' keypoolsize ' ] , 0 ) <nl> - assert_raises_rpc_error ( - 4 , " Error : Private keys are disabled for this wallet " , w5 . getnewaddress ) <nl> - assert_raises_rpc_error ( - 4 , " Error : Private keys are disabled for this wallet " , w5 . getrawchangeaddress ) <nl> + assert_raises_rpc_error ( - 4 , " Error : This wallet has no available keys " , w5 . getnewaddress ) <nl> + assert_raises_rpc_error ( - 4 , " Error : This wallet has no available keys " , w5 . getrawchangeaddress ) <nl> # Encrypt the wallet <nl> w5 . encryptwallet ( ' pass ' ) <nl> - assert_raises_rpc_error ( - 4 , " Error : Private keys are disabled for this wallet " , w5 . getnewaddress ) <nl> - assert_raises_rpc_error ( - 4 , " Error : Private keys are disabled for this wallet " , w5 . getrawchangeaddress ) <nl> + assert_raises_rpc_error ( - 4 , " Error : This wallet has no available keys " , w5 . getnewaddress ) <nl> + assert_raises_rpc_error ( - 4 , " Error : This wallet has no available keys " , w5 . getrawchangeaddress ) <nl> <nl> if __name__ = = ' __main__ ' : <nl> CreateWalletTest ( ) . main ( ) <nl>
Fetch keys from keypool when private keys are disabled
bitcoin/bitcoin
9b81fd19ac7ff9f34cc32cc221f057d9c3cd7218
2019-02-14T23:14:00Z
mmm a / tensorflow / core / kernels / matrix_triangular_solve_op . cc <nl> ppp b / tensorflow / core / kernels / matrix_triangular_solve_op . cc <nl> limitations under the License . <nl> # include " tensorflow / core / platform / macros . h " <nl> # include " tensorflow / core / platform / types . h " <nl> <nl> - # if GOOGLE_CUDA <nl> + # if GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> # include " tensorflow / core / platform / stream_executor . h " <nl> - # endif / / GOOGLE_CUDA <nl> + # endif / / GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> <nl> namespace tensorflow { <nl> <nl> - # if GOOGLE_CUDA <nl> + # if GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> namespace { <nl> template < typename Scalar > <nl> - se : : DeviceMemory < Scalar > AsDeviceMemory ( const Scalar * cuda_memory ) { <nl> - se : : DeviceMemoryBase wrapped ( const_cast < Scalar * > ( cuda_memory ) ) ; <nl> + se : : DeviceMemory < Scalar > AsDeviceMemory ( const Scalar * gpu_memory ) { <nl> + se : : DeviceMemoryBase wrapped ( const_cast < Scalar * > ( gpu_memory ) ) ; <nl> se : : DeviceMemory < Scalar > typed ( wrapped ) ; <nl> return typed ; <nl> } <nl> } / / namespace <nl> - # endif / / GOOGLE_CUDA <nl> + # endif / / GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> <nl> template < class Scalar > <nl> class MatrixTriangularSolveOp : public LinearAlgebraOp < Scalar > { <nl> REGISTER_LINALG_OP_CPU ( " BatchMatrixTriangularSolve " , <nl> REGISTER_LINALG_OP_CPU ( " BatchMatrixTriangularSolve " , <nl> ( MatrixTriangularSolveOp < double > ) , double ) ; <nl> <nl> - # ifdef GOOGLE_CUDA <nl> + # if GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> <nl> / / TODO ( rmlarsen ) : Re - factor to <nl> / / 1 . Enable buffer forwarding from rhs - > out . <nl> REGISTER_LINALG_OP_GPU ( " BatchMatrixTriangularSolve " , <nl> REGISTER_LINALG_OP_GPU ( " BatchMatrixTriangularSolve " , <nl> ( MatrixTriangularSolveOpGPU < double > ) , double ) ; <nl> <nl> - # endif / / GOOGLE_CUDA <nl> + # endif / / GOOGLE_CUDA | | TENSORFLOW_USE_ROCM <nl> <nl> } / / namespace tensorflow <nl>
Merge pull request from ROCmSoftwarePlatform : google_upstream_amtrix_triangular_solve_op
tensorflow/tensorflow
4e977877b974582d5f515300623b56ab59af4024
2019-05-21T14:37:12Z
new file mode 100644 <nl> index 000000000000 . . a42f2c93e017 <nl> mmm / dev / null <nl> ppp b / ReLU6 . cu <nl> <nl> + # include " THCUNN . h " <nl> + # include " common . h " <nl> + <nl> + struct ReLU6UpdateOutput <nl> + { <nl> + ReLU6UpdateOutput ( ) { } <nl> + <nl> + __device__ __forceinline__ void operator ( ) ( float * out , float * in ) <nl> + { <nl> + float x = * in ; <nl> + * out = ( x > 0 ) ? ( ( x < 6 ) ? x : 6 ) : 0 ; <nl> + } <nl> + } ; <nl> + <nl> + / / in - place variant <nl> + struct ReLU6UpdateOutputIP <nl> + { <nl> + ReLU6UpdateOutputIP ( ) { } <nl> + <nl> + __device__ __forceinline__ void operator ( ) ( float * x ) <nl> + { <nl> + * x = ( * x > 0 ) ? ( ( * x < 6 ) ? * x : 6 ) : 0 ; <nl> + } <nl> + } ; <nl> + <nl> + void THNN_CudaReLU6_updateOutput ( THCState * state , THCudaTensor * input , THCudaTensor * output , <nl> + bool inplace ) <nl> + { <nl> + THCUNN_assertSameGPU ( state , 2 , input , output ) ; <nl> + <nl> + if ( inplace ) <nl> + { <nl> + THC_pointwiseApply1 ( state , input , <nl> + ReLU6UpdateOutputIP ( ) <nl> + ) ; <nl> + THCudaTensor_set ( state , output , input ) ; <nl> + } <nl> + else <nl> + { <nl> + THCudaTensor_resizeAs ( state , output , input ) ; <nl> + THC_pointwiseApply2 ( state , output , input , <nl> + ReLU6UpdateOutput ( ) <nl> + ) ; <nl> + } <nl> + <nl> + THCudaCheck ( cudaGetLastError ( ) ) ; <nl> + } <nl> + <nl> + struct ReLU6UpdateGradInput <nl> + { <nl> + ReLU6UpdateGradInput ( ) { } <nl> + <nl> + __device__ __forceinline__ void operator ( ) ( <nl> + float * gradInput , float * input , float * gradOutput ) const <nl> + { <nl> + * gradInput = ( * input > 0 & & * input < 6 ) ? * gradOutput : 0 ; <nl> + } <nl> + } ; <nl> + <nl> + struct ReLU6UpdateGradInputIP <nl> + { <nl> + ReLU6UpdateGradInputIP ( ) { } <nl> + <nl> + __device__ __forceinline__ void operator ( ) ( <nl> + float * gradOutput , float * input ) const <nl> + { <nl> + * gradOutput = ( * input > 0 & & * input < 6 ) ? * gradOutput : 0 ; <nl> + } <nl> + } ; <nl> + <nl> + void THNN_CudaReLU6_updateGradInput ( THCState * state , THCudaTensor * input , THCudaTensor * gradOutput , <nl> + THCudaTensor * gradInput , bool inplace ) <nl> + { <nl> + THCUNN_assertSameGPU ( state , 3 , input , gradInput , gradOutput ) ; <nl> + <nl> + if ( inplace ) <nl> + { <nl> + THC_pointwiseApply2 ( state , gradOutput , input , <nl> + ReLU6UpdateGradInputIP ( ) <nl> + ) ; <nl> + THCudaTensor_set ( state , gradInput , gradOutput ) ; <nl> + } <nl> + else <nl> + { <nl> + THCudaTensor_resizeAs ( state , gradInput , input ) ; <nl> + THC_pointwiseApply3 ( state , gradInput , input , gradOutput , <nl> + ReLU6UpdateGradInput ( ) <nl> + ) ; <nl> + } <nl> + <nl> + THCudaCheck ( cudaGetLastError ( ) ) ; <nl> + } <nl> mmm a / THCUNN . h <nl> ppp b / THCUNN . h <nl> TH_API void THNN_CudaThreshold_updateGradInput ( <nl> double threshold , <nl> bool inplace ) ; <nl> <nl> + TH_API void THNN_CudaReLU6_updateOutput ( <nl> + THCState * state , <nl> + THCudaTensor * input , <nl> + THCudaTensor * output , <nl> + bool inplace ) ; <nl> + TH_API void THNN_CudaReLU6_updateGradInput ( <nl> + THCState * state , <nl> + THCudaTensor * input , <nl> + THCudaTensor * gradOutput , <nl> + THCudaTensor * gradInput , <nl> + bool inplace ) ; <nl> + <nl> TH_API void THNN_CudaTemporalConvolution_updateOutput ( <nl> THCState * state , <nl> THCudaTensor * input , <nl>
Added ReLU6 implementation and test .
pytorch/pytorch
6161049a316d7a8b5f308c474069bf3f658f3856
2016-06-14T18:50:48Z
mmm a / lib / Sema / TypeCheckProtocol . cpp <nl> ppp b / lib / Sema / TypeCheckProtocol . cpp <nl> checkConformsToProtocol ( TypeChecker & TC , Type T , ProtocolDecl * Proto , <nl> / / Check that T conforms to all inherited protocols . <nl> for ( auto InheritedProto : Proto - > getProtocols ( ) ) { <nl> ProtocolConformance * InheritedConformance = nullptr ; <nl> - if ( TC . conformsToProtocol ( T , InheritedProto , DC , & InheritedConformance , <nl> + if ( TC . conformsToProtocol ( T , InheritedProto , DC , <nl> + & InheritedConformance , <nl> ComplainLoc , ExplicitConformance ) ) { <nl> if ( ! conformance - > hasInheritedConformance ( InheritedProto ) ) <nl> conformance - > setInheritedConformance ( InheritedProto , <nl> InheritedConformance ) ; <nl> } else { <nl> + if ( auto knownConformance = <nl> + TC . Context . getConformsTo ( canT , InheritedProto ) ) { <nl> + / / Check to see if the conformance is in an incomplete state . If it is , <nl> + / / the inherited protocol has an indirectly recursive requirement . <nl> + if ( knownConformance - > getInt ( ) & & <nl> + ( knownConformance - > getPointer ( ) - > getState ( ) = = <nl> + ProtocolConformanceState : : Incomplete ) ) { <nl> + TC . diagnose ( InheritedProto , diag : : recursive_requirement_reference ) ; <nl> + } <nl> + } <nl> / / Recursive call already diagnosed this problem , but tack on a note <nl> / / to establish the relationship . <nl> if ( ComplainLoc . isValid ( ) ) { <nl> checkConformsToProtocol ( TypeChecker & TC , Type T , ProtocolDecl * Proto , <nl> diag : : inherited_protocol_does_not_conform , T , <nl> InheritedProto - > getDeclaredType ( ) ) ; <nl> } <nl> - <nl> + <nl> conformance - > setState ( ProtocolConformanceState : : Invalid ) ; <nl> return conformance ; <nl> } <nl> new file mode 100644 <nl> index 000000000000 . . 8ea1f98700b0 <nl> mmm / dev / null <nl> ppp b / test / decl / protocol / indirectly_recursive_requirement . swift <nl> <nl> + / / RUN : % swift - parse % s - verify <nl> + <nl> + protocol Incrementable { <nl> + func succ ( ) - > Self <nl> + } <nl> + <nl> + protocol _ForwardIndex { / / expected - error { { type may not reference itself as a requirement } } <nl> + typealias DistanceType = MyInt <nl> + } <nl> + <nl> + protocol ForwardIndex : _ForwardIndex { <nl> + } <nl> + <nl> + protocol _BidirectionalIndex : _ForwardIndex { / / expected - note { { type ' MyInt ' does not conform to inherited protocol ' _ForwardIndex ' } } <nl> + func pred ( ) - > Self <nl> + } <nl> + <nl> + protocol BidirectionalIndex : ForwardIndex , _BidirectionalIndex { / / expected - note { { type ' MyInt ' does not conform to inherited protocol ' _BidirectionalIndex ' } } <nl> + } <nl> + <nl> + protocol _RandomAccessIndex : _BidirectionalIndex { <nl> + typealias DistanceType : _NumericOperations / / expected - note { { protocol requires nested type ' DistanceType ' } } <nl> + } <nl> + <nl> + protocol RandomAccessIndex : BidirectionalIndex , _RandomAccessIndex { / / expected - note { { type ' MyInt ' does not conform to inherited protocol ' _RandomAccessIndex ' } } <nl> + } <nl> + <nl> + <nl> + protocol _NumericOperations { <nl> + } <nl> + <nl> + struct MyInt : RandomAccessIndex / / expected - error { { type ' MyInt ' does not conform to protocol ' _RandomAccessIndex ' } } <nl> + { <nl> + <nl> + } <nl>
Following up on r14651 , begin flagging and raising an error on indirectly self - recursive protocol requirements . ( This addresses rdar : / / problem / 16081342 . )
apple/swift
2f294aaee35cdc92890eca55b9e1945688bee4ae
2014-03-11T23:42:59Z
mmm a / src / DataStreams / TTLBlockInputStream . cpp <nl> ppp b / src / DataStreams / TTLBlockInputStream . cpp <nl> void TTLBlockInputStream : : removeValuesWithExpiredColumnTTL ( Block & block ) <nl> block . erase ( column ) ; <nl> } <nl> <nl> - void TTLBlockInputStream : : updateMovesTTL ( Block & block ) <nl> + void TTLBlockInputStream : : updateTTLWithDescriptions ( Block & block , const TTLDescriptions & descriptions , TTLInfoMap & ttl_info_map ) <nl> { <nl> std : : vector < String > columns_to_remove ; <nl> - for ( const auto & ttl_entry : metadata_snapshot - > getMoveTTLs ( ) ) <nl> + for ( const auto & ttl_entry : descriptions ) <nl> { <nl> - auto & new_ttl_info = new_ttl_infos . moves_ttl [ ttl_entry . result_column ] ; <nl> - <nl> + auto & new_ttl_info = ttl_info_map [ ttl_entry . result_column ] ; <nl> if ( ! block . has ( ttl_entry . result_column ) ) <nl> { <nl> columns_to_remove . push_back ( ttl_entry . result_column ) ; <nl> void TTLBlockInputStream : : updateMovesTTL ( Block & block ) <nl> block . erase ( column ) ; <nl> } <nl> <nl> + void TTLBlockInputStream : : updateMovesTTL ( Block & block ) <nl> + { <nl> + updateTTLWithDescriptions ( block , metadata_snapshot - > getMoveTTLs ( ) , new_ttl_infos . moves_ttl ) ; <nl> + } <nl> <nl> void TTLBlockInputStream : : updateRecompressionTTL ( Block & block ) <nl> { <nl> - std : : vector < String > columns_to_remove ; <nl> - for ( const auto & ttl_entry : metadata_snapshot - > getRecompressionTTLs ( ) ) <nl> - { <nl> - auto & new_ttl_info = new_ttl_infos . recompression_ttl [ ttl_entry . result_column ] ; <nl> - <nl> - if ( ! block . has ( ttl_entry . result_column ) ) <nl> - { <nl> - columns_to_remove . push_back ( ttl_entry . result_column ) ; <nl> - ttl_entry . expression - > execute ( block ) ; <nl> - } <nl> - <nl> - const IColumn * ttl_column = block . getByName ( ttl_entry . result_column ) . column . get ( ) ; <nl> - <nl> - for ( size_t i = 0 ; i < block . rows ( ) ; + + i ) <nl> - { <nl> - UInt32 cur_ttl = getTimestampByIndex ( ttl_column , i ) ; <nl> - new_ttl_info . update ( cur_ttl ) ; <nl> - } <nl> - } <nl> - <nl> - for ( const String & column : columns_to_remove ) <nl> - block . erase ( column ) ; <nl> + updateTTLWithDescriptions ( block , metadata_snapshot - > getRecompressionTTLs ( ) , new_ttl_infos . recompression_ttl ) ; <nl> } <nl> <nl> UInt32 TTLBlockInputStream : : getTimestampByIndex ( const IColumn * column , size_t ind ) <nl> mmm a / src / DataStreams / TTLBlockInputStream . h <nl> ppp b / src / DataStreams / TTLBlockInputStream . h <nl> <nl> # include < Storages / MergeTree / IMergeTreeDataPart . h > <nl> # include < Core / Block . h > <nl> # include < Interpreters / Aggregator . h > <nl> + # include < Storages / MergeTree / MergeTreeDataPartTTLInfo . h > <nl> <nl> # include < common / DateLUT . h > <nl> <nl> class TTLBlockInputStream : public IBlockInputStream <nl> / / / Finalize agg_result into result_columns <nl> void finalizeAggregates ( MutableColumns & result_columns ) ; <nl> <nl> + void updateTTLWithDescriptions ( Block & block , const TTLDescriptions & descriptions , TTLInfoMap & ttl_info_map ) ; <nl> + <nl> / / / Updates TTL for moves <nl> void updateMovesTTL ( Block & block ) ; <nl> <nl> mmm a / src / Storages / MergeTree / MergeTreeData . cpp <nl> ppp b / src / Storages / MergeTree / MergeTreeData . cpp <nl> CompressionCodecPtr MergeTreeData : : getCompressionCodecForPart ( size_t part_size_c <nl> auto metadata_snapshot = getInMemoryMetadataPtr ( ) ; <nl> <nl> const auto & recompression_ttl_entries = metadata_snapshot - > getRecompressionTTLs ( ) ; <nl> - LOG_DEBUG ( log , " RECOMPRESSION ENTRIES SIZE { } " , recompression_ttl_entries . size ( ) ) ; <nl> - LOG_DEBUG ( log , " TTL INFOS SIZE { } " , ttl_infos . recompression_ttl . size ( ) ) ; <nl> auto best_ttl_entry = selectTTLEntryForTTLInfos ( recompression_ttl_entries , ttl_infos . recompression_ttl , current_time , false ) ; <nl> <nl> if ( best_ttl_entry ) <nl> mmm a / src / Storages / MergeTree / MergeTreeDataMergerMutator . cpp <nl> ppp b / src / Storages / MergeTree / MergeTreeDataMergerMutator . cpp <nl> bool MergeTreeDataMergerMutator : : selectPartsToMerge ( <nl> return false ; <nl> } <nl> <nl> - / / LOG_DEBUG ( log , " SELECTING PARTS TO MERGE " ) ; <nl> time_t current_time = std : : time ( nullptr ) ; <nl> <nl> IMergeSelector : : PartsRanges parts_ranges ; <nl> bool MergeTreeDataMergerMutator : : selectPartsToMerge ( <nl> <nl> if ( metadata_snapshot - > hasAnyTTL ( ) & & merge_with_ttl_allowed & & ! ttl_merges_blocker . isCancelled ( ) ) <nl> { <nl> - <nl> - / / LOG_DEBUG ( log , " SELECTING WITH TTL " ) ; <nl> TTLDeleteMergeSelector delete_ttl_selector ( <nl> - next_ttl_merge_times_by_partition , <nl> + next_delete_ttl_merge_times_by_partition , <nl> current_time , <nl> data_settings - > merge_with_ttl_timeout , <nl> data_settings - > ttl_only_drop_parts ) ; <nl> bool MergeTreeDataMergerMutator : : selectPartsToMerge ( <nl> future_part . merge_type = MergeType : : TTL_DELETE ; <nl> else if ( metadata_snapshot - > hasAnyRecompressionTTL ( ) ) <nl> { <nl> - <nl> - / / LOG_DEBUG ( log , " SELECTING WITH RECOMPRESSION " ) ; <nl> TTLRecompressMergeSelector recompress_ttl_selector ( <nl> - next_ttl_merge_times_by_partition , <nl> + next_recompress_ttl_merge_times_by_partition , <nl> current_time , <nl> - data_settings - > merge_with_ttl_timeout , <nl> + data_settings - > merge_with_recompression_ttl_timeout , <nl> metadata_snapshot - > getRecompressionTTLs ( ) ) ; <nl> <nl> parts_to_merge = recompress_ttl_selector . select ( parts_ranges , max_total_size_to_merge ) ; <nl> if ( ! parts_to_merge . empty ( ) ) <nl> - { <nl> - / / LOG_DEBUG ( log , " SELECTED PARTS : { } " , parts_to_merge . size ( ) ) ; <nl> future_part . merge_type = MergeType : : TTL_RECOMPRESS ; <nl> - } <nl> } <nl> } <nl> <nl> mmm a / src / Storages / MergeTree / MergeTreeDataMergerMutator . h <nl> ppp b / src / Storages / MergeTree / MergeTreeDataMergerMutator . h <nl> public : <nl> time_t disk_space_warning_time = 0 ; <nl> <nl> / / / Stores the next TTL merge due time for each partition ( used only by TTLMergeSelector ) <nl> - ITTLMergeSelector : : PartitionIdToTTLs next_ttl_merge_times_by_partition ; <nl> + ITTLMergeSelector : : PartitionIdToTTLs next_delete_ttl_merge_times_by_partition ; <nl> <nl> + / / / Stores the next TTL merge due time for each partition ( used only by TTLMergeSelector ) <nl> + ITTLMergeSelector : : PartitionIdToTTLs next_recompress_ttl_merge_times_by_partition ; <nl> / / / Performing TTL merges independently for each partition guarantees that <nl> / / / there is only a limited number of TTL merges and no partition stores data , that is too stale <nl> } ; <nl> mmm a / src / Storages / MergeTree / MergeTreeSettings . h <nl> ppp b / src / Storages / MergeTree / MergeTreeSettings . h <nl> struct Settings ; <nl> M ( UInt64 , min_merge_bytes_to_use_direct_io , 10ULL * 1024 * 1024 * 1024 , " Minimal amount of bytes to enable O_DIRECT in merge ( 0 - disabled ) . " , 0 ) \ <nl> M ( UInt64 , index_granularity_bytes , 10 * 1024 * 1024 , " Approximate amount of bytes in single granule ( 0 - disabled ) . " , 0 ) \ <nl> M ( UInt64 , min_index_granularity_bytes , 1024 , " Minimum amount of bytes in single granule . " , 1024 ) \ <nl> - M ( Int64 , merge_with_ttl_timeout , 0 , " Minimal time in seconds , when merge with TTL can be repeated . " , 0 ) \ <nl> + M ( Int64 , merge_with_ttl_timeout , 3600 * 24 , " Minimal time in seconds , when merge with delete TTL can be repeated . " , 0 ) \ <nl> + M ( Int64 , merge_with_recompression_ttl_timeout , 3600 * 24 , " Minimal time in seconds , when merge with recompression TTL can be repeated . " , 0 ) \ <nl> M ( Bool , ttl_only_drop_parts , false , " Only drop altogether the expired parts and not partially prune them . " , 0 ) \ <nl> M ( Bool , write_final_mark , 1 , " Write final mark after end of column ( 0 - disabled , do nothing if index_granularity_bytes = 0 ) " , 0 ) \ <nl> M ( Bool , enable_mixed_granularity_parts , 1 , " Enable parts with adaptive and non adaptive granularity " , 0 ) \ <nl>
Less copypaste
ClickHouse/ClickHouse
fecb2f13115a1776e5fb9b1cd0f1c3f91e2c5ca5
2020-09-04T14:08:43Z
mmm a / src / Makefile . am <nl> ppp b / src / Makefile . am <nl> BITCOIN_CORE_H = \ <nl> versionbits . h \ <nl> versionbitsinfo . h \ <nl> walletinitinterface . h \ <nl> + wallet / bdb . h \ <nl> wallet / coincontrol . h \ <nl> wallet / context . h \ <nl> wallet / crypter . h \ <nl> libbitcoin_wallet_a_CPPFLAGS = $ ( AM_CPPFLAGS ) $ ( BITCOIN_INCLUDES ) <nl> libbitcoin_wallet_a_CXXFLAGS = $ ( AM_CXXFLAGS ) $ ( PIE_FLAGS ) <nl> libbitcoin_wallet_a_SOURCES = \ <nl> interfaces / wallet . cpp \ <nl> + wallet / bdb . cpp \ <nl> wallet / coincontrol . cpp \ <nl> wallet / context . cpp \ <nl> wallet / crypter . cpp \ <nl> mmm a / src / qt / rpcconsole . cpp <nl> ppp b / src / qt / rpcconsole . cpp <nl> <nl> # include < univalue . h > <nl> <nl> # ifdef ENABLE_WALLET <nl> + # include < wallet / bdb . h > <nl> # include < wallet / db . h > <nl> # include < wallet / wallet . h > <nl> # endif <nl> new file mode 100644 <nl> index 000000000000 . . 7ed9c88122aa <nl> mmm / dev / null <nl> ppp b / src / wallet / bdb . cpp <nl> <nl> + / / Copyright ( c ) 2009 - 2010 Satoshi Nakamoto <nl> + / / Copyright ( c ) 2009 - 2020 The Bitcoin Core developers <nl> + / / Distributed under the MIT software license , see the accompanying <nl> + / / file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> + <nl> + # include < wallet / bdb . h > <nl> + # include < wallet / db . h > <nl> + <nl> + # include < util / strencodings . h > <nl> + # include < util / translation . h > <nl> + <nl> + # include < stdint . h > <nl> + <nl> + # ifndef WIN32 <nl> + # include < sys / stat . h > <nl> + # endif <nl> + <nl> + namespace { <nl> + <nl> + / / ! Make sure database has a unique fileid within the environment . If it <nl> + / / ! doesn ' t , throw an error . BDB caches do not work properly when more than one <nl> + / / ! open database has the same fileid ( values written to one database may show <nl> + / / ! up in reads to other databases ) . <nl> + / / ! <nl> + / / ! BerkeleyDB generates unique fileids by default <nl> + / / ! ( https : / / docs . oracle . com / cd / E17275_01 / html / programmer_reference / program_copy . html ) , <nl> + / / ! so bitcoin should never create different databases with the same fileid , but <nl> + / / ! this error can be triggered if users manually copy database files . <nl> + void CheckUniqueFileid ( const BerkeleyEnvironment & env , const std : : string & filename , Db & db , WalletDatabaseFileId & fileid ) <nl> + { <nl> + if ( env . IsMock ( ) ) return ; <nl> + <nl> + int ret = db . get_mpf ( ) - > get_fileid ( fileid . value ) ; <nl> + if ( ret ! = 0 ) { <nl> + throw std : : runtime_error ( strprintf ( " BerkeleyBatch : Can ' t open database % s ( get_fileid failed with % d ) " , filename , ret ) ) ; <nl> + } <nl> + <nl> + for ( const auto & item : env . m_fileids ) { <nl> + if ( fileid = = item . second & & & fileid ! = & item . second ) { <nl> + throw std : : runtime_error ( strprintf ( " BerkeleyBatch : Can ' t open database % s ( duplicates fileid % s from % s ) " , filename , <nl> + HexStr ( std : : begin ( item . second . value ) , std : : end ( item . second . value ) ) , item . first ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + RecursiveMutex cs_db ; <nl> + std : : map < std : : string , std : : weak_ptr < BerkeleyEnvironment > > g_dbenvs GUARDED_BY ( cs_db ) ; / / ! < Map from directory name to db environment . <nl> + } / / namespace <nl> + <nl> + bool WalletDatabaseFileId : : operator = = ( const WalletDatabaseFileId & rhs ) const <nl> + { <nl> + return memcmp ( value , & rhs . value , sizeof ( value ) ) = = 0 ; <nl> + } <nl> + <nl> + bool IsBDBWalletLoaded ( const fs : : path & wallet_path ) <nl> + { <nl> + fs : : path env_directory ; <nl> + std : : string database_filename ; <nl> + SplitWalletPath ( wallet_path , env_directory , database_filename ) ; <nl> + LOCK ( cs_db ) ; <nl> + auto env = g_dbenvs . find ( env_directory . string ( ) ) ; <nl> + if ( env = = g_dbenvs . end ( ) ) return false ; <nl> + auto database = env - > second . lock ( ) ; <nl> + return database & & database - > IsDatabaseLoaded ( database_filename ) ; <nl> + } <nl> + <nl> + / * * <nl> + * @ param [ in ] wallet_path Path to wallet directory . Or ( for backwards compatibility only ) a path to a berkeley btree data file inside a wallet directory . <nl> + * @ param [ out ] database_filename Filename of berkeley btree data file inside the wallet directory . <nl> + * @ return A shared pointer to the BerkeleyEnvironment object for the wallet directory , never empty because ~ BerkeleyEnvironment <nl> + * erases the weak pointer from the g_dbenvs map . <nl> + * @ post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map . <nl> + * / <nl> + std : : shared_ptr < BerkeleyEnvironment > GetWalletEnv ( const fs : : path & wallet_path , std : : string & database_filename ) <nl> + { <nl> + fs : : path env_directory ; <nl> + SplitWalletPath ( wallet_path , env_directory , database_filename ) ; <nl> + LOCK ( cs_db ) ; <nl> + auto inserted = g_dbenvs . emplace ( env_directory . string ( ) , std : : weak_ptr < BerkeleyEnvironment > ( ) ) ; <nl> + if ( inserted . second ) { <nl> + auto env = std : : make_shared < BerkeleyEnvironment > ( env_directory . string ( ) ) ; <nl> + inserted . first - > second = env ; <nl> + return env ; <nl> + } <nl> + return inserted . first - > second . lock ( ) ; <nl> + } <nl> + <nl> + / / <nl> + / / BerkeleyBatch <nl> + / / <nl> + <nl> + void BerkeleyEnvironment : : Close ( ) <nl> + { <nl> + if ( ! fDbEnvInit ) <nl> + return ; <nl> + <nl> + fDbEnvInit = false ; <nl> + <nl> + for ( auto & db : m_databases ) { <nl> + auto count = mapFileUseCount . find ( db . first ) ; <nl> + assert ( count = = mapFileUseCount . end ( ) | | count - > second = = 0 ) ; <nl> + BerkeleyDatabase & database = db . second . get ( ) ; <nl> + if ( database . m_db ) { <nl> + database . m_db - > close ( 0 ) ; <nl> + database . m_db . reset ( ) ; <nl> + } <nl> + } <nl> + <nl> + FILE * error_file = nullptr ; <nl> + dbenv - > get_errfile ( & error_file ) ; <nl> + <nl> + int ret = dbenv - > close ( 0 ) ; <nl> + if ( ret ! = 0 ) <nl> + LogPrintf ( " BerkeleyEnvironment : : Close : Error % d closing database environment : % s \ n " , ret , DbEnv : : strerror ( ret ) ) ; <nl> + if ( ! fMockDb ) <nl> + DbEnv ( ( u_int32_t ) 0 ) . remove ( strPath . c_str ( ) , 0 ) ; <nl> + <nl> + if ( error_file ) fclose ( error_file ) ; <nl> + <nl> + UnlockDirectory ( strPath , " . walletlock " ) ; <nl> + } <nl> + <nl> + void BerkeleyEnvironment : : Reset ( ) <nl> + { <nl> + dbenv . reset ( new DbEnv ( DB_CXX_NO_EXCEPTIONS ) ) ; <nl> + fDbEnvInit = false ; <nl> + fMockDb = false ; <nl> + } <nl> + <nl> + BerkeleyEnvironment : : BerkeleyEnvironment ( const fs : : path & dir_path ) : strPath ( dir_path . string ( ) ) <nl> + { <nl> + Reset ( ) ; <nl> + } <nl> + <nl> + BerkeleyEnvironment : : ~ BerkeleyEnvironment ( ) <nl> + { <nl> + LOCK ( cs_db ) ; <nl> + g_dbenvs . erase ( strPath ) ; <nl> + Close ( ) ; <nl> + } <nl> + <nl> + bool BerkeleyEnvironment : : Open ( bool retry ) <nl> + { <nl> + if ( fDbEnvInit ) { <nl> + return true ; <nl> + } <nl> + <nl> + fs : : path pathIn = strPath ; <nl> + TryCreateDirectories ( pathIn ) ; <nl> + if ( ! LockDirectory ( pathIn , " . walletlock " ) ) { <nl> + LogPrintf ( " Cannot obtain a lock on wallet directory % s . Another instance of bitcoin may be using it . \ n " , strPath ) ; <nl> + return false ; <nl> + } <nl> + <nl> + fs : : path pathLogDir = pathIn / " database " ; <nl> + TryCreateDirectories ( pathLogDir ) ; <nl> + fs : : path pathErrorFile = pathIn / " db . log " ; <nl> + LogPrintf ( " BerkeleyEnvironment : : Open : LogDir = % s ErrorFile = % s \ n " , pathLogDir . string ( ) , pathErrorFile . string ( ) ) ; <nl> + <nl> + unsigned int nEnvFlags = 0 ; <nl> + if ( gArgs . GetBoolArg ( " - privdb " , DEFAULT_WALLET_PRIVDB ) ) <nl> + nEnvFlags | = DB_PRIVATE ; <nl> + <nl> + dbenv - > set_lg_dir ( pathLogDir . string ( ) . c_str ( ) ) ; <nl> + dbenv - > set_cachesize ( 0 , 0x100000 , 1 ) ; / / 1 MiB should be enough for just the wallet <nl> + dbenv - > set_lg_bsize ( 0x10000 ) ; <nl> + dbenv - > set_lg_max ( 1048576 ) ; <nl> + dbenv - > set_lk_max_locks ( 40000 ) ; <nl> + dbenv - > set_lk_max_objects ( 40000 ) ; <nl> + dbenv - > set_errfile ( fsbridge : : fopen ( pathErrorFile , " a " ) ) ; / / / debug <nl> + dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ; <nl> + dbenv - > set_flags ( DB_TXN_WRITE_NOSYNC , 1 ) ; <nl> + dbenv - > log_set_config ( DB_LOG_AUTO_REMOVE , 1 ) ; <nl> + int ret = dbenv - > open ( strPath . c_str ( ) , <nl> + DB_CREATE | <nl> + DB_INIT_LOCK | <nl> + DB_INIT_LOG | <nl> + DB_INIT_MPOOL | <nl> + DB_INIT_TXN | <nl> + DB_THREAD | <nl> + DB_RECOVER | <nl> + nEnvFlags , <nl> + S_IRUSR | S_IWUSR ) ; <nl> + if ( ret ! = 0 ) { <nl> + LogPrintf ( " BerkeleyEnvironment : : Open : Error % d opening database environment : % s \ n " , ret , DbEnv : : strerror ( ret ) ) ; <nl> + int ret2 = dbenv - > close ( 0 ) ; <nl> + if ( ret2 ! = 0 ) { <nl> + LogPrintf ( " BerkeleyEnvironment : : Open : Error % d closing failed database environment : % s \ n " , ret2 , DbEnv : : strerror ( ret2 ) ) ; <nl> + } <nl> + Reset ( ) ; <nl> + if ( retry ) { <nl> + / / try moving the database env out of the way <nl> + fs : : path pathDatabaseBak = pathIn / strprintf ( " database . % d . bak " , GetTime ( ) ) ; <nl> + try { <nl> + fs : : rename ( pathLogDir , pathDatabaseBak ) ; <nl> + LogPrintf ( " Moved old % s to % s . Retrying . \ n " , pathLogDir . string ( ) , pathDatabaseBak . string ( ) ) ; <nl> + } catch ( const fs : : filesystem_error & ) { <nl> + / / failure is ok ( well , not really , but it ' s not worse than what we started with ) <nl> + } <nl> + / / try opening it again one more time <nl> + if ( ! Open ( false / * retry * / ) ) { <nl> + / / if it still fails , it probably means we can ' t even create the database env <nl> + return false ; <nl> + } <nl> + } else { <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> + fDbEnvInit = true ; <nl> + fMockDb = false ; <nl> + return true ; <nl> + } <nl> + <nl> + / / ! Construct an in - memory mock Berkeley environment for testing <nl> + BerkeleyEnvironment : : BerkeleyEnvironment ( ) <nl> + { <nl> + Reset ( ) ; <nl> + <nl> + LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : MakeMock \ n " ) ; <nl> + <nl> + dbenv - > set_cachesize ( 1 , 0 , 1 ) ; <nl> + dbenv - > set_lg_bsize ( 10485760 * 4 ) ; <nl> + dbenv - > set_lg_max ( 10485760 ) ; <nl> + dbenv - > set_lk_max_locks ( 10000 ) ; <nl> + dbenv - > set_lk_max_objects ( 10000 ) ; <nl> + dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ; <nl> + dbenv - > log_set_config ( DB_LOG_IN_MEMORY , 1 ) ; <nl> + int ret = dbenv - > open ( nullptr , <nl> + DB_CREATE | <nl> + DB_INIT_LOCK | <nl> + DB_INIT_LOG | <nl> + DB_INIT_MPOOL | <nl> + DB_INIT_TXN | <nl> + DB_THREAD | <nl> + DB_PRIVATE , <nl> + S_IRUSR | S_IWUSR ) ; <nl> + if ( ret > 0 ) { <nl> + throw std : : runtime_error ( strprintf ( " BerkeleyEnvironment : : MakeMock : Error % d opening database environment . " , ret ) ) ; <nl> + } <nl> + <nl> + fDbEnvInit = true ; <nl> + fMockDb = true ; <nl> + } <nl> + <nl> + bool BerkeleyEnvironment : : Verify ( const std : : string & strFile ) <nl> + { <nl> + LOCK ( cs_db ) ; <nl> + assert ( mapFileUseCount . count ( strFile ) = = 0 ) ; <nl> + <nl> + Db db ( dbenv . get ( ) , 0 ) ; <nl> + int result = db . verify ( strFile . c_str ( ) , nullptr , nullptr , 0 ) ; <nl> + return result = = 0 ; <nl> + } <nl> + <nl> + BerkeleyBatch : : SafeDbt : : SafeDbt ( ) <nl> + { <nl> + m_dbt . set_flags ( DB_DBT_MALLOC ) ; <nl> + } <nl> + <nl> + BerkeleyBatch : : SafeDbt : : SafeDbt ( void * data , size_t size ) <nl> + : m_dbt ( data , size ) <nl> + { <nl> + } <nl> + <nl> + BerkeleyBatch : : SafeDbt : : ~ SafeDbt ( ) <nl> + { <nl> + if ( m_dbt . get_data ( ) ! = nullptr ) { <nl> + / / Clear memory , e . g . in case it was a private key <nl> + memory_cleanse ( m_dbt . get_data ( ) , m_dbt . get_size ( ) ) ; <nl> + / / under DB_DBT_MALLOC , data is malloced by the Dbt , but must be <nl> + / / freed by the caller . <nl> + / / https : / / docs . oracle . com / cd / E17275_01 / html / api_reference / C / dbt . html <nl> + if ( m_dbt . get_flags ( ) & DB_DBT_MALLOC ) { <nl> + free ( m_dbt . get_data ( ) ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + const void * BerkeleyBatch : : SafeDbt : : get_data ( ) const <nl> + { <nl> + return m_dbt . get_data ( ) ; <nl> + } <nl> + <nl> + u_int32_t BerkeleyBatch : : SafeDbt : : get_size ( ) const <nl> + { <nl> + return m_dbt . get_size ( ) ; <nl> + } <nl> + <nl> + BerkeleyBatch : : SafeDbt : : operator Dbt * ( ) <nl> + { <nl> + return & m_dbt ; <nl> + } <nl> + <nl> + bool BerkeleyBatch : : VerifyEnvironment ( const fs : : path & file_path , bilingual_str & errorStr ) <nl> + { <nl> + std : : string walletFile ; <nl> + std : : shared_ptr < BerkeleyEnvironment > env = GetWalletEnv ( file_path , walletFile ) ; <nl> + fs : : path walletDir = env - > Directory ( ) ; <nl> + <nl> + LogPrintf ( " Using BerkeleyDB version % s \ n " , BerkeleyDatabaseVersion ( ) ) ; <nl> + LogPrintf ( " Using wallet % s \ n " , file_path . string ( ) ) ; <nl> + <nl> + if ( ! env - > Open ( true / * retry * / ) ) { <nl> + errorStr = strprintf ( _ ( " Error initializing wallet database environment % s ! " ) , walletDir ) ; <nl> + return false ; <nl> + } <nl> + <nl> + return true ; <nl> + } <nl> + <nl> + bool BerkeleyBatch : : VerifyDatabaseFile ( const fs : : path & file_path , bilingual_str & errorStr ) <nl> + { <nl> + std : : string walletFile ; <nl> + std : : shared_ptr < BerkeleyEnvironment > env = GetWalletEnv ( file_path , walletFile ) ; <nl> + fs : : path walletDir = env - > Directory ( ) ; <nl> + <nl> + if ( fs : : exists ( walletDir / walletFile ) ) <nl> + { <nl> + if ( ! env - > Verify ( walletFile ) ) { <nl> + errorStr = strprintf ( _ ( " % s corrupt . Try using the wallet tool bitcoin - wallet to salvage or restoring a backup . " ) , walletFile ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + / / also return true if files does not exists <nl> + return true ; <nl> + } <nl> + <nl> + void BerkeleyEnvironment : : CheckpointLSN ( const std : : string & strFile ) <nl> + { <nl> + dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ; <nl> + if ( fMockDb ) <nl> + return ; <nl> + dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ; <nl> + } <nl> + <nl> + <nl> + BerkeleyBatch : : BerkeleyBatch ( BerkeleyDatabase & database , const char * pszMode , bool fFlushOnCloseIn ) : pdb ( nullptr ) , activeTxn ( nullptr ) <nl> + { <nl> + fReadOnly = ( ! strchr ( pszMode , ' + ' ) & & ! strchr ( pszMode , ' w ' ) ) ; <nl> + fFlushOnClose = fFlushOnCloseIn ; <nl> + env = database . env . get ( ) ; <nl> + if ( database . IsDummy ( ) ) { <nl> + return ; <nl> + } <nl> + const std : : string & strFilename = database . strFile ; <nl> + <nl> + bool fCreate = strchr ( pszMode , ' c ' ) ! = nullptr ; <nl> + unsigned int nFlags = DB_THREAD ; <nl> + if ( fCreate ) <nl> + nFlags | = DB_CREATE ; <nl> + <nl> + { <nl> + LOCK ( cs_db ) ; <nl> + if ( ! env - > Open ( false / * retry * / ) ) <nl> + throw std : : runtime_error ( " BerkeleyBatch : Failed to open database environment . " ) ; <nl> + <nl> + pdb = database . m_db . get ( ) ; <nl> + if ( pdb = = nullptr ) { <nl> + int ret ; <nl> + std : : unique_ptr < Db > pdb_temp = MakeUnique < Db > ( env - > dbenv . get ( ) , 0 ) ; <nl> + <nl> + bool fMockDb = env - > IsMock ( ) ; <nl> + if ( fMockDb ) { <nl> + DbMpoolFile * mpf = pdb_temp - > get_mpf ( ) ; <nl> + ret = mpf - > set_flags ( DB_MPOOL_NOFILE , 1 ) ; <nl> + if ( ret ! = 0 ) { <nl> + throw std : : runtime_error ( strprintf ( " BerkeleyBatch : Failed to configure for no temp file backing for database % s " , strFilename ) ) ; <nl> + } <nl> + } <nl> + <nl> + ret = pdb_temp - > open ( nullptr , / / Txn pointer <nl> + fMockDb ? nullptr : strFilename . c_str ( ) , / / Filename <nl> + fMockDb ? strFilename . c_str ( ) : " main " , / / Logical db name <nl> + DB_BTREE , / / Database type <nl> + nFlags , / / Flags <nl> + 0 ) ; <nl> + <nl> + if ( ret ! = 0 ) { <nl> + throw std : : runtime_error ( strprintf ( " BerkeleyBatch : Error % d , can ' t open database % s " , ret , strFilename ) ) ; <nl> + } <nl> + <nl> + / / Call CheckUniqueFileid on the containing BDB environment to <nl> + / / avoid BDB data consistency bugs that happen when different data <nl> + / / files in the same environment have the same fileid . <nl> + / / <nl> + / / Also call CheckUniqueFileid on all the other g_dbenvs to prevent <nl> + / / bitcoin from opening the same data file through another <nl> + / / environment when the file is referenced through equivalent but <nl> + / / not obviously identical symlinked or hard linked or bind mounted <nl> + / / paths . In the future a more relaxed check for equal inode and <nl> + / / device ids could be done instead , which would allow opening <nl> + / / different backup copies of a wallet at the same time . Maybe even <nl> + / / more ideally , an exclusive lock for accessing the database could <nl> + / / be implemented , so no equality checks are needed at all . ( Newer <nl> + / / versions of BDB have an set_lk_exclusive method for this <nl> + / / purpose , but the older version we use does not . ) <nl> + for ( const auto & env : g_dbenvs ) { <nl> + CheckUniqueFileid ( * env . second . lock ( ) . get ( ) , strFilename , * pdb_temp , this - > env - > m_fileids [ strFilename ] ) ; <nl> + } <nl> + <nl> + pdb = pdb_temp . release ( ) ; <nl> + database . m_db . reset ( pdb ) ; <nl> + <nl> + if ( fCreate & & ! Exists ( std : : string ( " version " ) ) ) { <nl> + bool fTmp = fReadOnly ; <nl> + fReadOnly = false ; <nl> + Write ( std : : string ( " version " ) , CLIENT_VERSION ) ; <nl> + fReadOnly = fTmp ; <nl> + } <nl> + } <nl> + + + env - > mapFileUseCount [ strFilename ] ; <nl> + strFile = strFilename ; <nl> + } <nl> + } <nl> + <nl> + void BerkeleyBatch : : Flush ( ) <nl> + { <nl> + if ( activeTxn ) <nl> + return ; <nl> + <nl> + / / Flush database activity from memory pool to disk log <nl> + unsigned int nMinutes = 0 ; <nl> + if ( fReadOnly ) <nl> + nMinutes = 1 ; <nl> + <nl> + if ( env ) { / / env is nullptr for dummy databases ( i . e . in tests ) . Don ' t actually flush if env is nullptr so we don ' t segfault <nl> + env - > dbenv - > txn_checkpoint ( nMinutes ? gArgs . GetArg ( " - dblogsize " , DEFAULT_WALLET_DBLOGSIZE ) * 1024 : 0 , nMinutes , 0 ) ; <nl> + } <nl> + } <nl> + <nl> + void BerkeleyDatabase : : IncrementUpdateCounter ( ) <nl> + { <nl> + + + nUpdateCounter ; <nl> + } <nl> + <nl> + void BerkeleyBatch : : Close ( ) <nl> + { <nl> + if ( ! pdb ) <nl> + return ; <nl> + if ( activeTxn ) <nl> + activeTxn - > abort ( ) ; <nl> + activeTxn = nullptr ; <nl> + pdb = nullptr ; <nl> + <nl> + if ( fFlushOnClose ) <nl> + Flush ( ) ; <nl> + <nl> + { <nl> + LOCK ( cs_db ) ; <nl> + - - env - > mapFileUseCount [ strFile ] ; <nl> + } <nl> + env - > m_db_in_use . notify_all ( ) ; <nl> + } <nl> + <nl> + void BerkeleyEnvironment : : CloseDb ( const std : : string & strFile ) <nl> + { <nl> + { <nl> + LOCK ( cs_db ) ; <nl> + auto it = m_databases . find ( strFile ) ; <nl> + assert ( it ! = m_databases . end ( ) ) ; <nl> + BerkeleyDatabase & database = it - > second . get ( ) ; <nl> + if ( database . m_db ) { <nl> + / / Close the database handle <nl> + database . m_db - > close ( 0 ) ; <nl> + database . m_db . reset ( ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void BerkeleyEnvironment : : ReloadDbEnv ( ) <nl> + { <nl> + / / Make sure that no Db ' s are in use <nl> + AssertLockNotHeld ( cs_db ) ; <nl> + std : : unique_lock < RecursiveMutex > lock ( cs_db ) ; <nl> + m_db_in_use . wait ( lock , [ this ] ( ) { <nl> + for ( auto & count : mapFileUseCount ) { <nl> + if ( count . second > 0 ) return false ; <nl> + } <nl> + return true ; <nl> + } ) ; <nl> + <nl> + std : : vector < std : : string > filenames ; <nl> + for ( auto it : m_databases ) { <nl> + filenames . push_back ( it . first ) ; <nl> + } <nl> + / / Close the individual Db ' s <nl> + for ( const std : : string & filename : filenames ) { <nl> + CloseDb ( filename ) ; <nl> + } <nl> + / / Reset the environment <nl> + Flush ( true ) ; / / This will flush and close the environment <nl> + Reset ( ) ; <nl> + Open ( true ) ; <nl> + } <nl> + <nl> + bool BerkeleyBatch : : Rewrite ( BerkeleyDatabase & database , const char * pszSkip ) <nl> + { <nl> + if ( database . IsDummy ( ) ) { <nl> + return true ; <nl> + } <nl> + BerkeleyEnvironment * env = database . env . get ( ) ; <nl> + const std : : string & strFile = database . strFile ; <nl> + while ( true ) { <nl> + { <nl> + LOCK ( cs_db ) ; <nl> + if ( ! env - > mapFileUseCount . count ( strFile ) | | env - > mapFileUseCount [ strFile ] = = 0 ) { <nl> + / / Flush log data to the dat file <nl> + env - > CloseDb ( strFile ) ; <nl> + env - > CheckpointLSN ( strFile ) ; <nl> + env - > mapFileUseCount . erase ( strFile ) ; <nl> + <nl> + bool fSuccess = true ; <nl> + LogPrintf ( " BerkeleyBatch : : Rewrite : Rewriting % s . . . \ n " , strFile ) ; <nl> + std : : string strFileRes = strFile + " . rewrite " ; <nl> + { / / surround usage of db with extra { } <nl> + BerkeleyBatch db ( database , " r " ) ; <nl> + std : : unique_ptr < Db > pdbCopy = MakeUnique < Db > ( env - > dbenv . get ( ) , 0 ) ; <nl> + <nl> + int ret = pdbCopy - > open ( nullptr , / / Txn pointer <nl> + strFileRes . c_str ( ) , / / Filename <nl> + " main " , / / Logical db name <nl> + DB_BTREE , / / Database type <nl> + DB_CREATE , / / Flags <nl> + 0 ) ; <nl> + if ( ret > 0 ) { <nl> + LogPrintf ( " BerkeleyBatch : : Rewrite : Can ' t create database file % s \ n " , strFileRes ) ; <nl> + fSuccess = false ; <nl> + } <nl> + <nl> + Dbc * pcursor = db . GetCursor ( ) ; <nl> + if ( pcursor ) <nl> + while ( fSuccess ) { <nl> + CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> + CDataStream ssValue ( SER_DISK , CLIENT_VERSION ) ; <nl> + int ret1 = db . ReadAtCursor ( pcursor , ssKey , ssValue ) ; <nl> + if ( ret1 = = DB_NOTFOUND ) { <nl> + pcursor - > close ( ) ; <nl> + break ; <nl> + } else if ( ret1 ! = 0 ) { <nl> + pcursor - > close ( ) ; <nl> + fSuccess = false ; <nl> + break ; <nl> + } <nl> + if ( pszSkip & & <nl> + strncmp ( ssKey . data ( ) , pszSkip , std : : min ( ssKey . size ( ) , strlen ( pszSkip ) ) ) = = 0 ) <nl> + continue ; <nl> + if ( strncmp ( ssKey . data ( ) , " \ x07version " , 8 ) = = 0 ) { <nl> + / / Update version : <nl> + ssValue . clear ( ) ; <nl> + ssValue < < CLIENT_VERSION ; <nl> + } <nl> + Dbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> + Dbt datValue ( ssValue . data ( ) , ssValue . size ( ) ) ; <nl> + int ret2 = pdbCopy - > put ( nullptr , & datKey , & datValue , DB_NOOVERWRITE ) ; <nl> + if ( ret2 > 0 ) <nl> + fSuccess = false ; <nl> + } <nl> + if ( fSuccess ) { <nl> + db . Close ( ) ; <nl> + env - > CloseDb ( strFile ) ; <nl> + if ( pdbCopy - > close ( 0 ) ) <nl> + fSuccess = false ; <nl> + } else { <nl> + pdbCopy - > close ( 0 ) ; <nl> + } <nl> + } <nl> + if ( fSuccess ) { <nl> + Db dbA ( env - > dbenv . get ( ) , 0 ) ; <nl> + if ( dbA . remove ( strFile . c_str ( ) , nullptr , 0 ) ) <nl> + fSuccess = false ; <nl> + Db dbB ( env - > dbenv . get ( ) , 0 ) ; <nl> + if ( dbB . rename ( strFileRes . c_str ( ) , nullptr , strFile . c_str ( ) , 0 ) ) <nl> + fSuccess = false ; <nl> + } <nl> + if ( ! fSuccess ) <nl> + LogPrintf ( " BerkeleyBatch : : Rewrite : Failed to rewrite database file % s \ n " , strFileRes ) ; <nl> + return fSuccess ; <nl> + } <nl> + } <nl> + UninterruptibleSleep ( std : : chrono : : milliseconds { 100 } ) ; <nl> + } <nl> + } <nl> + <nl> + <nl> + void BerkeleyEnvironment : : Flush ( bool fShutdown ) <nl> + { <nl> + int64_t nStart = GetTimeMillis ( ) ; <nl> + / / Flush log data to the actual data file on all files that are not in use <nl> + LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : [ % s ] Flush ( % s ) % s \ n " , strPath , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " ) ; <nl> + if ( ! fDbEnvInit ) <nl> + return ; <nl> + { <nl> + LOCK ( cs_db ) ; <nl> + std : : map < std : : string , int > : : iterator mi = mapFileUseCount . begin ( ) ; <nl> + while ( mi ! = mapFileUseCount . end ( ) ) { <nl> + std : : string strFile = ( * mi ) . first ; <nl> + int nRefCount = ( * mi ) . second ; <nl> + LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : Flushing % s ( refcount = % d ) . . . \ n " , strFile , nRefCount ) ; <nl> + if ( nRefCount = = 0 ) { <nl> + / / Move log data to the dat file <nl> + CloseDb ( strFile ) ; <nl> + LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : % s checkpoint \ n " , strFile ) ; <nl> + dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ; <nl> + LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : % s detach \ n " , strFile ) ; <nl> + if ( ! fMockDb ) <nl> + dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ; <nl> + LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : % s closed \ n " , strFile ) ; <nl> + mapFileUseCount . erase ( mi + + ) ; <nl> + } else <nl> + mi + + ; <nl> + } <nl> + LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : Flush ( % s ) % s took % 15dms \ n " , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " , GetTimeMillis ( ) - nStart ) ; <nl> + if ( fShutdown ) { <nl> + char * * listp ; <nl> + if ( mapFileUseCount . empty ( ) ) { <nl> + dbenv - > log_archive ( & listp , DB_ARCH_REMOVE ) ; <nl> + Close ( ) ; <nl> + if ( ! fMockDb ) { <nl> + fs : : remove_all ( fs : : path ( strPath ) / " database " ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + bool BerkeleyBatch : : PeriodicFlush ( BerkeleyDatabase & database ) <nl> + { <nl> + if ( database . IsDummy ( ) ) { <nl> + return true ; <nl> + } <nl> + bool ret = false ; <nl> + BerkeleyEnvironment * env = database . env . get ( ) ; <nl> + const std : : string & strFile = database . strFile ; <nl> + TRY_LOCK ( cs_db , lockDb ) ; <nl> + if ( lockDb ) <nl> + { <nl> + / / Don ' t do this if any databases are in use <nl> + int nRefCount = 0 ; <nl> + std : : map < std : : string , int > : : iterator mit = env - > mapFileUseCount . begin ( ) ; <nl> + while ( mit ! = env - > mapFileUseCount . end ( ) ) <nl> + { <nl> + nRefCount + = ( * mit ) . second ; <nl> + mit + + ; <nl> + } <nl> + <nl> + if ( nRefCount = = 0 ) <nl> + { <nl> + std : : map < std : : string , int > : : iterator mi = env - > mapFileUseCount . find ( strFile ) ; <nl> + if ( mi ! = env - > mapFileUseCount . end ( ) ) <nl> + { <nl> + LogPrint ( BCLog : : WALLETDB , " Flushing % s \ n " , strFile ) ; <nl> + int64_t nStart = GetTimeMillis ( ) ; <nl> + <nl> + / / Flush wallet file so it ' s self contained <nl> + env - > CloseDb ( strFile ) ; <nl> + env - > CheckpointLSN ( strFile ) ; <nl> + <nl> + env - > mapFileUseCount . erase ( mi + + ) ; <nl> + LogPrint ( BCLog : : WALLETDB , " Flushed % s % dms \ n " , strFile , GetTimeMillis ( ) - nStart ) ; <nl> + ret = true ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + return ret ; <nl> + } <nl> + <nl> + bool BerkeleyDatabase : : Rewrite ( const char * pszSkip ) <nl> + { <nl> + return BerkeleyBatch : : Rewrite ( * this , pszSkip ) ; <nl> + } <nl> + <nl> + bool BerkeleyDatabase : : Backup ( const std : : string & strDest ) const <nl> + { <nl> + if ( IsDummy ( ) ) { <nl> + return false ; <nl> + } <nl> + while ( true ) <nl> + { <nl> + { <nl> + LOCK ( cs_db ) ; <nl> + if ( ! env - > mapFileUseCount . count ( strFile ) | | env - > mapFileUseCount [ strFile ] = = 0 ) <nl> + { <nl> + / / Flush log data to the dat file <nl> + env - > CloseDb ( strFile ) ; <nl> + env - > CheckpointLSN ( strFile ) ; <nl> + env - > mapFileUseCount . erase ( strFile ) ; <nl> + <nl> + / / Copy wallet file <nl> + fs : : path pathSrc = env - > Directory ( ) / strFile ; <nl> + fs : : path pathDest ( strDest ) ; <nl> + if ( fs : : is_directory ( pathDest ) ) <nl> + pathDest / = strFile ; <nl> + <nl> + try { <nl> + if ( fs : : equivalent ( pathSrc , pathDest ) ) { <nl> + LogPrintf ( " cannot backup to wallet source file % s \ n " , pathDest . string ( ) ) ; <nl> + return false ; <nl> + } <nl> + <nl> + fs : : copy_file ( pathSrc , pathDest , fs : : copy_option : : overwrite_if_exists ) ; <nl> + LogPrintf ( " copied % s to % s \ n " , strFile , pathDest . string ( ) ) ; <nl> + return true ; <nl> + } catch ( const fs : : filesystem_error & e ) { <nl> + LogPrintf ( " error copying % s to % s - % s \ n " , strFile , pathDest . string ( ) , fsbridge : : get_filesystem_error_message ( e ) ) ; <nl> + return false ; <nl> + } <nl> + } <nl> + } <nl> + UninterruptibleSleep ( std : : chrono : : milliseconds { 100 } ) ; <nl> + } <nl> + } <nl> + <nl> + void BerkeleyDatabase : : Flush ( bool shutdown ) <nl> + { <nl> + if ( ! IsDummy ( ) ) { <nl> + env - > Flush ( shutdown ) ; <nl> + if ( shutdown ) { <nl> + LOCK ( cs_db ) ; <nl> + g_dbenvs . erase ( env - > Directory ( ) . string ( ) ) ; <nl> + env = nullptr ; <nl> + } else { <nl> + / / TODO : To avoid g_dbenvs . erase erasing the environment prematurely after the <nl> + / / first database shutdown when multiple databases are open in the same <nl> + / / environment , should replace raw database ` env ` pointers with shared or weak <nl> + / / pointers , or else separate the database and environment shutdowns so <nl> + / / environments can be shut down after databases . <nl> + env - > m_fileids . erase ( strFile ) ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + void BerkeleyDatabase : : ReloadDbEnv ( ) <nl> + { <nl> + if ( ! IsDummy ( ) ) { <nl> + env - > ReloadDbEnv ( ) ; <nl> + } <nl> + } <nl> + <nl> + Dbc * BerkeleyBatch : : GetCursor ( ) <nl> + { <nl> + if ( ! pdb ) <nl> + return nullptr ; <nl> + Dbc * pcursor = nullptr ; <nl> + int ret = pdb - > cursor ( nullptr , & pcursor , 0 ) ; <nl> + if ( ret ! = 0 ) <nl> + return nullptr ; <nl> + return pcursor ; <nl> + } <nl> + <nl> + int BerkeleyBatch : : ReadAtCursor ( Dbc * pcursor , CDataStream & ssKey , CDataStream & ssValue ) <nl> + { <nl> + / / Read at cursor <nl> + SafeDbt datKey ; <nl> + SafeDbt datValue ; <nl> + int ret = pcursor - > get ( datKey , datValue , DB_NEXT ) ; <nl> + if ( ret ! = 0 ) <nl> + return ret ; <nl> + else if ( datKey . get_data ( ) = = nullptr | | datValue . get_data ( ) = = nullptr ) <nl> + return 99999 ; <nl> + <nl> + / / Convert to streams <nl> + ssKey . SetType ( SER_DISK ) ; <nl> + ssKey . clear ( ) ; <nl> + ssKey . write ( ( char * ) datKey . get_data ( ) , datKey . get_size ( ) ) ; <nl> + ssValue . SetType ( SER_DISK ) ; <nl> + ssValue . clear ( ) ; <nl> + ssValue . write ( ( char * ) datValue . get_data ( ) , datValue . get_size ( ) ) ; <nl> + return 0 ; <nl> + } <nl> + <nl> + bool BerkeleyBatch : : TxnBegin ( ) <nl> + { <nl> + if ( ! pdb | | activeTxn ) <nl> + return false ; <nl> + DbTxn * ptxn = env - > TxnBegin ( ) ; <nl> + if ( ! ptxn ) <nl> + return false ; <nl> + activeTxn = ptxn ; <nl> + return true ; <nl> + } <nl> + <nl> + bool BerkeleyBatch : : TxnCommit ( ) <nl> + { <nl> + if ( ! pdb | | ! activeTxn ) <nl> + return false ; <nl> + int ret = activeTxn - > commit ( 0 ) ; <nl> + activeTxn = nullptr ; <nl> + return ( ret = = 0 ) ; <nl> + } <nl> + <nl> + bool BerkeleyBatch : : TxnAbort ( ) <nl> + { <nl> + if ( ! pdb | | ! activeTxn ) <nl> + return false ; <nl> + int ret = activeTxn - > abort ( ) ; <nl> + activeTxn = nullptr ; <nl> + return ( ret = = 0 ) ; <nl> + } <nl> + <nl> + std : : string BerkeleyDatabaseVersion ( ) <nl> + { <nl> + return DbEnv : : version ( nullptr , nullptr , nullptr ) ; <nl> + } <nl> new file mode 100644 <nl> index 000000000000 . . 5ed364344b44 <nl> mmm / dev / null <nl> ppp b / src / wallet / bdb . h <nl> <nl> + / / Copyright ( c ) 2009 - 2010 Satoshi Nakamoto <nl> + / / Copyright ( c ) 2009 - 2020 The Bitcoin Core developers <nl> + / / Distributed under the MIT software license , see the accompanying <nl> + / / file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> + <nl> + # ifndef BITCOIN_WALLET_BDB_H <nl> + # define BITCOIN_WALLET_BDB_H <nl> + <nl> + # include < clientversion . h > <nl> + # include < fs . h > <nl> + # include < serialize . h > <nl> + # include < streams . h > <nl> + # include < util / system . h > <nl> + # include < wallet / db . h > <nl> + <nl> + # include < atomic > <nl> + # include < map > <nl> + # include < memory > <nl> + # include < string > <nl> + # include < unordered_map > <nl> + # include < vector > <nl> + <nl> + # if defined ( __GNUC__ ) & & ! defined ( __clang__ ) <nl> + # pragma GCC diagnostic push <nl> + # pragma GCC diagnostic ignored " - Wsuggest - override " <nl> + # endif <nl> + # include < db_cxx . h > <nl> + # if defined ( __GNUC__ ) & & ! defined ( __clang__ ) <nl> + # pragma GCC diagnostic pop <nl> + # endif <nl> + <nl> + struct bilingual_str ; <nl> + <nl> + static const unsigned int DEFAULT_WALLET_DBLOGSIZE = 100 ; <nl> + static const bool DEFAULT_WALLET_PRIVDB = true ; <nl> + <nl> + struct WalletDatabaseFileId { <nl> + u_int8_t value [ DB_FILE_ID_LEN ] ; <nl> + bool operator = = ( const WalletDatabaseFileId & rhs ) const ; <nl> + } ; <nl> + <nl> + class BerkeleyDatabase ; <nl> + <nl> + class BerkeleyEnvironment <nl> + { <nl> + private : <nl> + bool fDbEnvInit ; <nl> + bool fMockDb ; <nl> + / / Don ' t change into fs : : path , as that can result in <nl> + / / shutdown problems / crashes caused by a static initialized internal pointer . <nl> + std : : string strPath ; <nl> + <nl> + public : <nl> + std : : unique_ptr < DbEnv > dbenv ; <nl> + std : : map < std : : string , int > mapFileUseCount ; <nl> + std : : map < std : : string , std : : reference_wrapper < BerkeleyDatabase > > m_databases ; <nl> + std : : unordered_map < std : : string , WalletDatabaseFileId > m_fileids ; <nl> + std : : condition_variable_any m_db_in_use ; <nl> + <nl> + BerkeleyEnvironment ( const fs : : path & env_directory ) ; <nl> + BerkeleyEnvironment ( ) ; <nl> + ~ BerkeleyEnvironment ( ) ; <nl> + void Reset ( ) ; <nl> + <nl> + bool IsMock ( ) const { return fMockDb ; } <nl> + bool IsInitialized ( ) const { return fDbEnvInit ; } <nl> + bool IsDatabaseLoaded ( const std : : string & db_filename ) const { return m_databases . find ( db_filename ) ! = m_databases . end ( ) ; } <nl> + fs : : path Directory ( ) const { return strPath ; } <nl> + <nl> + bool Verify ( const std : : string & strFile ) ; <nl> + <nl> + bool Open ( bool retry ) ; <nl> + void Close ( ) ; <nl> + void Flush ( bool fShutdown ) ; <nl> + void CheckpointLSN ( const std : : string & strFile ) ; <nl> + <nl> + void CloseDb ( const std : : string & strFile ) ; <nl> + void ReloadDbEnv ( ) ; <nl> + <nl> + DbTxn * TxnBegin ( int flags = DB_TXN_WRITE_NOSYNC ) <nl> + { <nl> + DbTxn * ptxn = nullptr ; <nl> + int ret = dbenv - > txn_begin ( nullptr , & ptxn , flags ) ; <nl> + if ( ! ptxn | | ret ! = 0 ) <nl> + return nullptr ; <nl> + return ptxn ; <nl> + } <nl> + } ; <nl> + <nl> + / * * Get BerkeleyEnvironment and database filename given a wallet path . * / <nl> + std : : shared_ptr < BerkeleyEnvironment > GetWalletEnv ( const fs : : path & wallet_path , std : : string & database_filename ) ; <nl> + <nl> + / * * Return wheter a BDB wallet database is currently loaded . * / <nl> + bool IsBDBWalletLoaded ( const fs : : path & wallet_path ) ; <nl> + <nl> + / * * An instance of this class represents one database . <nl> + * For BerkeleyDB this is just a ( env , strFile ) tuple . <nl> + * * / <nl> + class BerkeleyDatabase <nl> + { <nl> + friend class BerkeleyBatch ; <nl> + public : <nl> + / * * Create dummy DB handle * / <nl> + BerkeleyDatabase ( ) : nUpdateCounter ( 0 ) , nLastSeen ( 0 ) , nLastFlushed ( 0 ) , nLastWalletUpdate ( 0 ) , env ( nullptr ) <nl> + { <nl> + } <nl> + <nl> + / * * Create DB handle to real database * / <nl> + BerkeleyDatabase ( std : : shared_ptr < BerkeleyEnvironment > env , std : : string filename ) : <nl> + nUpdateCounter ( 0 ) , nLastSeen ( 0 ) , nLastFlushed ( 0 ) , nLastWalletUpdate ( 0 ) , env ( std : : move ( env ) ) , strFile ( std : : move ( filename ) ) <nl> + { <nl> + auto inserted = this - > env - > m_databases . emplace ( strFile , std : : ref ( * this ) ) ; <nl> + assert ( inserted . second ) ; <nl> + } <nl> + <nl> + ~ BerkeleyDatabase ( ) { <nl> + if ( env ) { <nl> + size_t erased = env - > m_databases . erase ( strFile ) ; <nl> + assert ( erased = = 1 ) ; <nl> + } <nl> + } <nl> + <nl> + / * * Return object for accessing database at specified path . * / <nl> + static std : : unique_ptr < BerkeleyDatabase > Create ( const fs : : path & path ) <nl> + { <nl> + std : : string filename ; <nl> + return MakeUnique < BerkeleyDatabase > ( GetWalletEnv ( path , filename ) , std : : move ( filename ) ) ; <nl> + } <nl> + <nl> + / * * Return object for accessing dummy database with no read / write capabilities . * / <nl> + static std : : unique_ptr < BerkeleyDatabase > CreateDummy ( ) <nl> + { <nl> + return MakeUnique < BerkeleyDatabase > ( ) ; <nl> + } <nl> + <nl> + / * * Return object for accessing temporary in - memory database . * / <nl> + static std : : unique_ptr < BerkeleyDatabase > CreateMock ( ) <nl> + { <nl> + return MakeUnique < BerkeleyDatabase > ( std : : make_shared < BerkeleyEnvironment > ( ) , " " ) ; <nl> + } <nl> + <nl> + / * * Rewrite the entire database on disk , with the exception of key pszSkip if non - zero <nl> + * / <nl> + bool Rewrite ( const char * pszSkip = nullptr ) ; <nl> + <nl> + / * * Back up the entire database to a file . <nl> + * / <nl> + bool Backup ( const std : : string & strDest ) const ; <nl> + <nl> + / * * Make sure all changes are flushed to disk . <nl> + * / <nl> + void Flush ( bool shutdown ) ; <nl> + <nl> + void IncrementUpdateCounter ( ) ; <nl> + <nl> + void ReloadDbEnv ( ) ; <nl> + <nl> + std : : atomic < unsigned int > nUpdateCounter ; <nl> + unsigned int nLastSeen ; <nl> + unsigned int nLastFlushed ; <nl> + int64_t nLastWalletUpdate ; <nl> + <nl> + / * * <nl> + * Pointer to shared database environment . <nl> + * <nl> + * Normally there is only one BerkeleyDatabase object per <nl> + * BerkeleyEnvivonment , but in the special , backwards compatible case where <nl> + * multiple wallet BDB data files are loaded from the same directory , this <nl> + * will point to a shared instance that gets freed when the last data file <nl> + * is closed . <nl> + * / <nl> + std : : shared_ptr < BerkeleyEnvironment > env ; <nl> + <nl> + / * * Database pointer . This is initialized lazily and reset during flushes , so it can be null . * / <nl> + std : : unique_ptr < Db > m_db ; <nl> + <nl> + private : <nl> + std : : string strFile ; <nl> + <nl> + / * * Return whether this database handle is a dummy for testing . <nl> + * Only to be used at a low level , application should ideally not care <nl> + * about this . <nl> + * / <nl> + bool IsDummy ( ) const { return env = = nullptr ; } <nl> + } ; <nl> + <nl> + / * * RAII class that provides access to a Berkeley database * / <nl> + class BerkeleyBatch <nl> + { <nl> + / * * RAII class that automatically cleanses its data on destruction * / <nl> + class SafeDbt final <nl> + { <nl> + Dbt m_dbt ; <nl> + <nl> + public : <nl> + / / construct Dbt with internally - managed data <nl> + SafeDbt ( ) ; <nl> + / / construct Dbt with provided data <nl> + SafeDbt ( void * data , size_t size ) ; <nl> + ~ SafeDbt ( ) ; <nl> + <nl> + / / delegate to Dbt <nl> + const void * get_data ( ) const ; <nl> + u_int32_t get_size ( ) const ; <nl> + <nl> + / / conversion operator to access the underlying Dbt <nl> + operator Dbt * ( ) ; <nl> + } ; <nl> + <nl> + protected : <nl> + Db * pdb ; <nl> + std : : string strFile ; <nl> + DbTxn * activeTxn ; <nl> + bool fReadOnly ; <nl> + bool fFlushOnClose ; <nl> + BerkeleyEnvironment * env ; <nl> + <nl> + public : <nl> + explicit BerkeleyBatch ( BerkeleyDatabase & database , const char * pszMode = " r + " , bool fFlushOnCloseIn = true ) ; <nl> + ~ BerkeleyBatch ( ) { Close ( ) ; } <nl> + <nl> + BerkeleyBatch ( const BerkeleyBatch & ) = delete ; <nl> + BerkeleyBatch & operator = ( const BerkeleyBatch & ) = delete ; <nl> + <nl> + void Flush ( ) ; <nl> + void Close ( ) ; <nl> + <nl> + / * flush the wallet passively ( TRY_LOCK ) <nl> + ideal to be called periodically * / <nl> + static bool PeriodicFlush ( BerkeleyDatabase & database ) ; <nl> + / * verifies the database environment * / <nl> + static bool VerifyEnvironment ( const fs : : path & file_path , bilingual_str & errorStr ) ; <nl> + / * verifies the database file * / <nl> + static bool VerifyDatabaseFile ( const fs : : path & file_path , bilingual_str & errorStr ) ; <nl> + <nl> + template < typename K , typename T > <nl> + bool Read ( const K & key , T & value ) <nl> + { <nl> + if ( ! pdb ) <nl> + return false ; <nl> + <nl> + / / Key <nl> + CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> + ssKey . reserve ( 1000 ) ; <nl> + ssKey < < key ; <nl> + SafeDbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> + <nl> + / / Read <nl> + SafeDbt datValue ; <nl> + int ret = pdb - > get ( activeTxn , datKey , datValue , 0 ) ; <nl> + bool success = false ; <nl> + if ( datValue . get_data ( ) ! = nullptr ) { <nl> + / / Unserialize value <nl> + try { <nl> + CDataStream ssValue ( ( char * ) datValue . get_data ( ) , ( char * ) datValue . get_data ( ) + datValue . get_size ( ) , SER_DISK , CLIENT_VERSION ) ; <nl> + ssValue > > value ; <nl> + success = true ; <nl> + } catch ( const std : : exception & ) { <nl> + / / In this case success remains ' false ' <nl> + } <nl> + } <nl> + return ret = = 0 & & success ; <nl> + } <nl> + <nl> + template < typename K , typename T > <nl> + bool Write ( const K & key , const T & value , bool fOverwrite = true ) <nl> + { <nl> + if ( ! pdb ) <nl> + return true ; <nl> + if ( fReadOnly ) <nl> + assert ( ! " Write called on database in read - only mode " ) ; <nl> + <nl> + / / Key <nl> + CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> + ssKey . reserve ( 1000 ) ; <nl> + ssKey < < key ; <nl> + SafeDbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> + <nl> + / / Value <nl> + CDataStream ssValue ( SER_DISK , CLIENT_VERSION ) ; <nl> + ssValue . reserve ( 10000 ) ; <nl> + ssValue < < value ; <nl> + SafeDbt datValue ( ssValue . data ( ) , ssValue . size ( ) ) ; <nl> + <nl> + / / Write <nl> + int ret = pdb - > put ( activeTxn , datKey , datValue , ( fOverwrite ? 0 : DB_NOOVERWRITE ) ) ; <nl> + return ( ret = = 0 ) ; <nl> + } <nl> + <nl> + template < typename K > <nl> + bool Erase ( const K & key ) <nl> + { <nl> + if ( ! pdb ) <nl> + return false ; <nl> + if ( fReadOnly ) <nl> + assert ( ! " Erase called on database in read - only mode " ) ; <nl> + <nl> + / / Key <nl> + CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> + ssKey . reserve ( 1000 ) ; <nl> + ssKey < < key ; <nl> + SafeDbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> + <nl> + / / Erase <nl> + int ret = pdb - > del ( activeTxn , datKey , 0 ) ; <nl> + return ( ret = = 0 | | ret = = DB_NOTFOUND ) ; <nl> + } <nl> + <nl> + template < typename K > <nl> + bool Exists ( const K & key ) <nl> + { <nl> + if ( ! pdb ) <nl> + return false ; <nl> + <nl> + / / Key <nl> + CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> + ssKey . reserve ( 1000 ) ; <nl> + ssKey < < key ; <nl> + SafeDbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> + <nl> + / / Exists <nl> + int ret = pdb - > exists ( activeTxn , datKey , 0 ) ; <nl> + return ( ret = = 0 ) ; <nl> + } <nl> + <nl> + Dbc * GetCursor ( ) ; <nl> + int ReadAtCursor ( Dbc * pcursor , CDataStream & ssKey , CDataStream & ssValue ) ; <nl> + bool TxnBegin ( ) ; <nl> + bool TxnCommit ( ) ; <nl> + bool TxnAbort ( ) ; <nl> + <nl> + bool static Rewrite ( BerkeleyDatabase & database , const char * pszSkip = nullptr ) ; <nl> + } ; <nl> + <nl> + std : : string BerkeleyDatabaseVersion ( ) ; <nl> + <nl> + # endif / / BITCOIN_WALLET_BDB_H <nl> mmm a / src / wallet / db . cpp <nl> ppp b / src / wallet / db . cpp <nl> <nl> / / Distributed under the MIT software license , see the accompanying <nl> / / file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> <nl> + # include < fs . h > <nl> # include < wallet / db . h > <nl> <nl> - # include < util / strencodings . h > <nl> - # include < util / translation . h > <nl> + # include < string > <nl> <nl> - # include < stdint . h > <nl> - <nl> - # ifndef WIN32 <nl> - # include < sys / stat . h > <nl> - # endif <nl> - <nl> - namespace { <nl> - <nl> - / / ! Make sure database has a unique fileid within the environment . If it <nl> - / / ! doesn ' t , throw an error . BDB caches do not work properly when more than one <nl> - / / ! open database has the same fileid ( values written to one database may show <nl> - / / ! up in reads to other databases ) . <nl> - / / ! <nl> - / / ! BerkeleyDB generates unique fileids by default <nl> - / / ! ( https : / / docs . oracle . com / cd / E17275_01 / html / programmer_reference / program_copy . html ) , <nl> - / / ! so bitcoin should never create different databases with the same fileid , but <nl> - / / ! this error can be triggered if users manually copy database files . <nl> - void CheckUniqueFileid ( const BerkeleyEnvironment & env , const std : : string & filename , Db & db , WalletDatabaseFileId & fileid ) <nl> - { <nl> - if ( env . IsMock ( ) ) return ; <nl> - <nl> - int ret = db . get_mpf ( ) - > get_fileid ( fileid . value ) ; <nl> - if ( ret ! = 0 ) { <nl> - throw std : : runtime_error ( strprintf ( " BerkeleyBatch : Can ' t open database % s ( get_fileid failed with % d ) " , filename , ret ) ) ; <nl> - } <nl> - <nl> - for ( const auto & item : env . m_fileids ) { <nl> - if ( fileid = = item . second & & & fileid ! = & item . second ) { <nl> - throw std : : runtime_error ( strprintf ( " BerkeleyBatch : Can ' t open database % s ( duplicates fileid % s from % s ) " , filename , <nl> - HexStr ( std : : begin ( item . second . value ) , std : : end ( item . second . value ) ) , item . first ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - RecursiveMutex cs_db ; <nl> - std : : map < std : : string , std : : weak_ptr < BerkeleyEnvironment > > g_dbenvs GUARDED_BY ( cs_db ) ; / / ! < Map from directory name to db environment . <nl> - } / / namespace <nl> - <nl> - bool WalletDatabaseFileId : : operator = = ( const WalletDatabaseFileId & rhs ) const <nl> - { <nl> - return memcmp ( value , & rhs . value , sizeof ( value ) ) = = 0 ; <nl> - } <nl> - <nl> - static void SplitWalletPath ( const fs : : path & wallet_path , fs : : path & env_directory , std : : string & database_filename ) <nl> + void SplitWalletPath ( const fs : : path & wallet_path , fs : : path & env_directory , std : : string & database_filename ) <nl> { <nl> if ( fs : : is_regular_file ( wallet_path ) ) { <nl> / / Special case for backwards compatibility : if wallet path points to an <nl> static void SplitWalletPath ( const fs : : path & wallet_path , fs : : path & env_directory <nl> } <nl> } <nl> <nl> - bool IsWalletLoaded ( const fs : : path & wallet_path ) <nl> - { <nl> - fs : : path env_directory ; <nl> - std : : string database_filename ; <nl> - SplitWalletPath ( wallet_path , env_directory , database_filename ) ; <nl> - LOCK ( cs_db ) ; <nl> - auto env = g_dbenvs . find ( env_directory . string ( ) ) ; <nl> - if ( env = = g_dbenvs . end ( ) ) return false ; <nl> - auto database = env - > second . lock ( ) ; <nl> - return database & & database - > IsDatabaseLoaded ( database_filename ) ; <nl> - } <nl> - <nl> fs : : path WalletDataFilePath ( const fs : : path & wallet_path ) <nl> { <nl> fs : : path env_directory ; <nl> fs : : path WalletDataFilePath ( const fs : : path & wallet_path ) <nl> SplitWalletPath ( wallet_path , env_directory , database_filename ) ; <nl> return env_directory / database_filename ; <nl> } <nl> - <nl> - / * * <nl> - * @ param [ in ] wallet_path Path to wallet directory . Or ( for backwards compatibility only ) a path to a berkeley btree data file inside a wallet directory . <nl> - * @ param [ out ] database_filename Filename of berkeley btree data file inside the wallet directory . <nl> - * @ return A shared pointer to the BerkeleyEnvironment object for the wallet directory , never empty because ~ BerkeleyEnvironment <nl> - * erases the weak pointer from the g_dbenvs map . <nl> - * @ post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map . <nl> - * / <nl> - std : : shared_ptr < BerkeleyEnvironment > GetWalletEnv ( const fs : : path & wallet_path , std : : string & database_filename ) <nl> - { <nl> - fs : : path env_directory ; <nl> - SplitWalletPath ( wallet_path , env_directory , database_filename ) ; <nl> - LOCK ( cs_db ) ; <nl> - auto inserted = g_dbenvs . emplace ( env_directory . string ( ) , std : : weak_ptr < BerkeleyEnvironment > ( ) ) ; <nl> - if ( inserted . second ) { <nl> - auto env = std : : make_shared < BerkeleyEnvironment > ( env_directory . string ( ) ) ; <nl> - inserted . first - > second = env ; <nl> - return env ; <nl> - } <nl> - return inserted . first - > second . lock ( ) ; <nl> - } <nl> - <nl> - / / <nl> - / / BerkeleyBatch <nl> - / / <nl> - <nl> - void BerkeleyEnvironment : : Close ( ) <nl> - { <nl> - if ( ! fDbEnvInit ) <nl> - return ; <nl> - <nl> - fDbEnvInit = false ; <nl> - <nl> - for ( auto & db : m_databases ) { <nl> - auto count = mapFileUseCount . find ( db . first ) ; <nl> - assert ( count = = mapFileUseCount . end ( ) | | count - > second = = 0 ) ; <nl> - BerkeleyDatabase & database = db . second . get ( ) ; <nl> - if ( database . m_db ) { <nl> - database . m_db - > close ( 0 ) ; <nl> - database . m_db . reset ( ) ; <nl> - } <nl> - } <nl> - <nl> - FILE * error_file = nullptr ; <nl> - dbenv - > get_errfile ( & error_file ) ; <nl> - <nl> - int ret = dbenv - > close ( 0 ) ; <nl> - if ( ret ! = 0 ) <nl> - LogPrintf ( " BerkeleyEnvironment : : Close : Error % d closing database environment : % s \ n " , ret , DbEnv : : strerror ( ret ) ) ; <nl> - if ( ! fMockDb ) <nl> - DbEnv ( ( u_int32_t ) 0 ) . remove ( strPath . c_str ( ) , 0 ) ; <nl> - <nl> - if ( error_file ) fclose ( error_file ) ; <nl> - <nl> - UnlockDirectory ( strPath , " . walletlock " ) ; <nl> - } <nl> - <nl> - void BerkeleyEnvironment : : Reset ( ) <nl> - { <nl> - dbenv . reset ( new DbEnv ( DB_CXX_NO_EXCEPTIONS ) ) ; <nl> - fDbEnvInit = false ; <nl> - fMockDb = false ; <nl> - } <nl> - <nl> - BerkeleyEnvironment : : BerkeleyEnvironment ( const fs : : path & dir_path ) : strPath ( dir_path . string ( ) ) <nl> - { <nl> - Reset ( ) ; <nl> - } <nl> - <nl> - BerkeleyEnvironment : : ~ BerkeleyEnvironment ( ) <nl> - { <nl> - LOCK ( cs_db ) ; <nl> - g_dbenvs . erase ( strPath ) ; <nl> - Close ( ) ; <nl> - } <nl> - <nl> - bool BerkeleyEnvironment : : Open ( bool retry ) <nl> - { <nl> - if ( fDbEnvInit ) { <nl> - return true ; <nl> - } <nl> - <nl> - fs : : path pathIn = strPath ; <nl> - TryCreateDirectories ( pathIn ) ; <nl> - if ( ! LockDirectory ( pathIn , " . walletlock " ) ) { <nl> - LogPrintf ( " Cannot obtain a lock on wallet directory % s . Another instance of bitcoin may be using it . \ n " , strPath ) ; <nl> - return false ; <nl> - } <nl> - <nl> - fs : : path pathLogDir = pathIn / " database " ; <nl> - TryCreateDirectories ( pathLogDir ) ; <nl> - fs : : path pathErrorFile = pathIn / " db . log " ; <nl> - LogPrintf ( " BerkeleyEnvironment : : Open : LogDir = % s ErrorFile = % s \ n " , pathLogDir . string ( ) , pathErrorFile . string ( ) ) ; <nl> - <nl> - unsigned int nEnvFlags = 0 ; <nl> - if ( gArgs . GetBoolArg ( " - privdb " , DEFAULT_WALLET_PRIVDB ) ) <nl> - nEnvFlags | = DB_PRIVATE ; <nl> - <nl> - dbenv - > set_lg_dir ( pathLogDir . string ( ) . c_str ( ) ) ; <nl> - dbenv - > set_cachesize ( 0 , 0x100000 , 1 ) ; / / 1 MiB should be enough for just the wallet <nl> - dbenv - > set_lg_bsize ( 0x10000 ) ; <nl> - dbenv - > set_lg_max ( 1048576 ) ; <nl> - dbenv - > set_lk_max_locks ( 40000 ) ; <nl> - dbenv - > set_lk_max_objects ( 40000 ) ; <nl> - dbenv - > set_errfile ( fsbridge : : fopen ( pathErrorFile , " a " ) ) ; / / / debug <nl> - dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ; <nl> - dbenv - > set_flags ( DB_TXN_WRITE_NOSYNC , 1 ) ; <nl> - dbenv - > log_set_config ( DB_LOG_AUTO_REMOVE , 1 ) ; <nl> - int ret = dbenv - > open ( strPath . c_str ( ) , <nl> - DB_CREATE | <nl> - DB_INIT_LOCK | <nl> - DB_INIT_LOG | <nl> - DB_INIT_MPOOL | <nl> - DB_INIT_TXN | <nl> - DB_THREAD | <nl> - DB_RECOVER | <nl> - nEnvFlags , <nl> - S_IRUSR | S_IWUSR ) ; <nl> - if ( ret ! = 0 ) { <nl> - LogPrintf ( " BerkeleyEnvironment : : Open : Error % d opening database environment : % s \ n " , ret , DbEnv : : strerror ( ret ) ) ; <nl> - int ret2 = dbenv - > close ( 0 ) ; <nl> - if ( ret2 ! = 0 ) { <nl> - LogPrintf ( " BerkeleyEnvironment : : Open : Error % d closing failed database environment : % s \ n " , ret2 , DbEnv : : strerror ( ret2 ) ) ; <nl> - } <nl> - Reset ( ) ; <nl> - if ( retry ) { <nl> - / / try moving the database env out of the way <nl> - fs : : path pathDatabaseBak = pathIn / strprintf ( " database . % d . bak " , GetTime ( ) ) ; <nl> - try { <nl> - fs : : rename ( pathLogDir , pathDatabaseBak ) ; <nl> - LogPrintf ( " Moved old % s to % s . Retrying . \ n " , pathLogDir . string ( ) , pathDatabaseBak . string ( ) ) ; <nl> - } catch ( const fs : : filesystem_error & ) { <nl> - / / failure is ok ( well , not really , but it ' s not worse than what we started with ) <nl> - } <nl> - / / try opening it again one more time <nl> - if ( ! Open ( false / * retry * / ) ) { <nl> - / / if it still fails , it probably means we can ' t even create the database env <nl> - return false ; <nl> - } <nl> - } else { <nl> - return false ; <nl> - } <nl> - } <nl> - <nl> - fDbEnvInit = true ; <nl> - fMockDb = false ; <nl> - return true ; <nl> - } <nl> - <nl> - / / ! Construct an in - memory mock Berkeley environment for testing <nl> - BerkeleyEnvironment : : BerkeleyEnvironment ( ) <nl> - { <nl> - Reset ( ) ; <nl> - <nl> - LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : MakeMock \ n " ) ; <nl> - <nl> - dbenv - > set_cachesize ( 1 , 0 , 1 ) ; <nl> - dbenv - > set_lg_bsize ( 10485760 * 4 ) ; <nl> - dbenv - > set_lg_max ( 10485760 ) ; <nl> - dbenv - > set_lk_max_locks ( 10000 ) ; <nl> - dbenv - > set_lk_max_objects ( 10000 ) ; <nl> - dbenv - > set_flags ( DB_AUTO_COMMIT , 1 ) ; <nl> - dbenv - > log_set_config ( DB_LOG_IN_MEMORY , 1 ) ; <nl> - int ret = dbenv - > open ( nullptr , <nl> - DB_CREATE | <nl> - DB_INIT_LOCK | <nl> - DB_INIT_LOG | <nl> - DB_INIT_MPOOL | <nl> - DB_INIT_TXN | <nl> - DB_THREAD | <nl> - DB_PRIVATE , <nl> - S_IRUSR | S_IWUSR ) ; <nl> - if ( ret > 0 ) { <nl> - throw std : : runtime_error ( strprintf ( " BerkeleyEnvironment : : MakeMock : Error % d opening database environment . " , ret ) ) ; <nl> - } <nl> - <nl> - fDbEnvInit = true ; <nl> - fMockDb = true ; <nl> - } <nl> - <nl> - bool BerkeleyEnvironment : : Verify ( const std : : string & strFile ) <nl> - { <nl> - LOCK ( cs_db ) ; <nl> - assert ( mapFileUseCount . count ( strFile ) = = 0 ) ; <nl> - <nl> - Db db ( dbenv . get ( ) , 0 ) ; <nl> - int result = db . verify ( strFile . c_str ( ) , nullptr , nullptr , 0 ) ; <nl> - return result = = 0 ; <nl> - } <nl> - <nl> - BerkeleyBatch : : SafeDbt : : SafeDbt ( ) <nl> - { <nl> - m_dbt . set_flags ( DB_DBT_MALLOC ) ; <nl> - } <nl> - <nl> - BerkeleyBatch : : SafeDbt : : SafeDbt ( void * data , size_t size ) <nl> - : m_dbt ( data , size ) <nl> - { <nl> - } <nl> - <nl> - BerkeleyBatch : : SafeDbt : : ~ SafeDbt ( ) <nl> - { <nl> - if ( m_dbt . get_data ( ) ! = nullptr ) { <nl> - / / Clear memory , e . g . in case it was a private key <nl> - memory_cleanse ( m_dbt . get_data ( ) , m_dbt . get_size ( ) ) ; <nl> - / / under DB_DBT_MALLOC , data is malloced by the Dbt , but must be <nl> - / / freed by the caller . <nl> - / / https : / / docs . oracle . com / cd / E17275_01 / html / api_reference / C / dbt . html <nl> - if ( m_dbt . get_flags ( ) & DB_DBT_MALLOC ) { <nl> - free ( m_dbt . get_data ( ) ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - const void * BerkeleyBatch : : SafeDbt : : get_data ( ) const <nl> - { <nl> - return m_dbt . get_data ( ) ; <nl> - } <nl> - <nl> - u_int32_t BerkeleyBatch : : SafeDbt : : get_size ( ) const <nl> - { <nl> - return m_dbt . get_size ( ) ; <nl> - } <nl> - <nl> - BerkeleyBatch : : SafeDbt : : operator Dbt * ( ) <nl> - { <nl> - return & m_dbt ; <nl> - } <nl> - <nl> - bool BerkeleyBatch : : VerifyEnvironment ( const fs : : path & file_path , bilingual_str & errorStr ) <nl> - { <nl> - std : : string walletFile ; <nl> - std : : shared_ptr < BerkeleyEnvironment > env = GetWalletEnv ( file_path , walletFile ) ; <nl> - fs : : path walletDir = env - > Directory ( ) ; <nl> - <nl> - LogPrintf ( " Using BerkeleyDB version % s \ n " , BerkeleyDatabaseVersion ( ) ) ; <nl> - LogPrintf ( " Using wallet % s \ n " , file_path . string ( ) ) ; <nl> - <nl> - if ( ! env - > Open ( true / * retry * / ) ) { <nl> - errorStr = strprintf ( _ ( " Error initializing wallet database environment % s ! " ) , walletDir ) ; <nl> - return false ; <nl> - } <nl> - <nl> - return true ; <nl> - } <nl> - <nl> - bool BerkeleyBatch : : VerifyDatabaseFile ( const fs : : path & file_path , bilingual_str & errorStr ) <nl> - { <nl> - std : : string walletFile ; <nl> - std : : shared_ptr < BerkeleyEnvironment > env = GetWalletEnv ( file_path , walletFile ) ; <nl> - fs : : path walletDir = env - > Directory ( ) ; <nl> - <nl> - if ( fs : : exists ( walletDir / walletFile ) ) <nl> - { <nl> - if ( ! env - > Verify ( walletFile ) ) { <nl> - errorStr = strprintf ( _ ( " % s corrupt . Try using the wallet tool bitcoin - wallet to salvage or restoring a backup . " ) , walletFile ) ; <nl> - return false ; <nl> - } <nl> - } <nl> - / / also return true if files does not exists <nl> - return true ; <nl> - } <nl> - <nl> - void BerkeleyEnvironment : : CheckpointLSN ( const std : : string & strFile ) <nl> - { <nl> - dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ; <nl> - if ( fMockDb ) <nl> - return ; <nl> - dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ; <nl> - } <nl> - <nl> - <nl> - BerkeleyBatch : : BerkeleyBatch ( BerkeleyDatabase & database , const char * pszMode , bool fFlushOnCloseIn ) : pdb ( nullptr ) , activeTxn ( nullptr ) <nl> - { <nl> - fReadOnly = ( ! strchr ( pszMode , ' + ' ) & & ! strchr ( pszMode , ' w ' ) ) ; <nl> - fFlushOnClose = fFlushOnCloseIn ; <nl> - env = database . env . get ( ) ; <nl> - if ( database . IsDummy ( ) ) { <nl> - return ; <nl> - } <nl> - const std : : string & strFilename = database . strFile ; <nl> - <nl> - bool fCreate = strchr ( pszMode , ' c ' ) ! = nullptr ; <nl> - unsigned int nFlags = DB_THREAD ; <nl> - if ( fCreate ) <nl> - nFlags | = DB_CREATE ; <nl> - <nl> - { <nl> - LOCK ( cs_db ) ; <nl> - if ( ! env - > Open ( false / * retry * / ) ) <nl> - throw std : : runtime_error ( " BerkeleyBatch : Failed to open database environment . " ) ; <nl> - <nl> - pdb = database . m_db . get ( ) ; <nl> - if ( pdb = = nullptr ) { <nl> - int ret ; <nl> - std : : unique_ptr < Db > pdb_temp = MakeUnique < Db > ( env - > dbenv . get ( ) , 0 ) ; <nl> - <nl> - bool fMockDb = env - > IsMock ( ) ; <nl> - if ( fMockDb ) { <nl> - DbMpoolFile * mpf = pdb_temp - > get_mpf ( ) ; <nl> - ret = mpf - > set_flags ( DB_MPOOL_NOFILE , 1 ) ; <nl> - if ( ret ! = 0 ) { <nl> - throw std : : runtime_error ( strprintf ( " BerkeleyBatch : Failed to configure for no temp file backing for database % s " , strFilename ) ) ; <nl> - } <nl> - } <nl> - <nl> - ret = pdb_temp - > open ( nullptr , / / Txn pointer <nl> - fMockDb ? nullptr : strFilename . c_str ( ) , / / Filename <nl> - fMockDb ? strFilename . c_str ( ) : " main " , / / Logical db name <nl> - DB_BTREE , / / Database type <nl> - nFlags , / / Flags <nl> - 0 ) ; <nl> - <nl> - if ( ret ! = 0 ) { <nl> - throw std : : runtime_error ( strprintf ( " BerkeleyBatch : Error % d , can ' t open database % s " , ret , strFilename ) ) ; <nl> - } <nl> - <nl> - / / Call CheckUniqueFileid on the containing BDB environment to <nl> - / / avoid BDB data consistency bugs that happen when different data <nl> - / / files in the same environment have the same fileid . <nl> - / / <nl> - / / Also call CheckUniqueFileid on all the other g_dbenvs to prevent <nl> - / / bitcoin from opening the same data file through another <nl> - / / environment when the file is referenced through equivalent but <nl> - / / not obviously identical symlinked or hard linked or bind mounted <nl> - / / paths . In the future a more relaxed check for equal inode and <nl> - / / device ids could be done instead , which would allow opening <nl> - / / different backup copies of a wallet at the same time . Maybe even <nl> - / / more ideally , an exclusive lock for accessing the database could <nl> - / / be implemented , so no equality checks are needed at all . ( Newer <nl> - / / versions of BDB have an set_lk_exclusive method for this <nl> - / / purpose , but the older version we use does not . ) <nl> - for ( const auto & env : g_dbenvs ) { <nl> - CheckUniqueFileid ( * env . second . lock ( ) . get ( ) , strFilename , * pdb_temp , this - > env - > m_fileids [ strFilename ] ) ; <nl> - } <nl> - <nl> - pdb = pdb_temp . release ( ) ; <nl> - database . m_db . reset ( pdb ) ; <nl> - <nl> - if ( fCreate & & ! Exists ( std : : string ( " version " ) ) ) { <nl> - bool fTmp = fReadOnly ; <nl> - fReadOnly = false ; <nl> - Write ( std : : string ( " version " ) , CLIENT_VERSION ) ; <nl> - fReadOnly = fTmp ; <nl> - } <nl> - } <nl> - + + env - > mapFileUseCount [ strFilename ] ; <nl> - strFile = strFilename ; <nl> - } <nl> - } <nl> - <nl> - void BerkeleyBatch : : Flush ( ) <nl> - { <nl> - if ( activeTxn ) <nl> - return ; <nl> - <nl> - / / Flush database activity from memory pool to disk log <nl> - unsigned int nMinutes = 0 ; <nl> - if ( fReadOnly ) <nl> - nMinutes = 1 ; <nl> - <nl> - if ( env ) { / / env is nullptr for dummy databases ( i . e . in tests ) . Don ' t actually flush if env is nullptr so we don ' t segfault <nl> - env - > dbenv - > txn_checkpoint ( nMinutes ? gArgs . GetArg ( " - dblogsize " , DEFAULT_WALLET_DBLOGSIZE ) * 1024 : 0 , nMinutes , 0 ) ; <nl> - } <nl> - } <nl> - <nl> - void BerkeleyDatabase : : IncrementUpdateCounter ( ) <nl> - { <nl> - + + nUpdateCounter ; <nl> - } <nl> - <nl> - void BerkeleyBatch : : Close ( ) <nl> - { <nl> - if ( ! pdb ) <nl> - return ; <nl> - if ( activeTxn ) <nl> - activeTxn - > abort ( ) ; <nl> - activeTxn = nullptr ; <nl> - pdb = nullptr ; <nl> - <nl> - if ( fFlushOnClose ) <nl> - Flush ( ) ; <nl> - <nl> - { <nl> - LOCK ( cs_db ) ; <nl> - - - env - > mapFileUseCount [ strFile ] ; <nl> - } <nl> - env - > m_db_in_use . notify_all ( ) ; <nl> - } <nl> - <nl> - void BerkeleyEnvironment : : CloseDb ( const std : : string & strFile ) <nl> - { <nl> - { <nl> - LOCK ( cs_db ) ; <nl> - auto it = m_databases . find ( strFile ) ; <nl> - assert ( it ! = m_databases . end ( ) ) ; <nl> - BerkeleyDatabase & database = it - > second . get ( ) ; <nl> - if ( database . m_db ) { <nl> - / / Close the database handle <nl> - database . m_db - > close ( 0 ) ; <nl> - database . m_db . reset ( ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void BerkeleyEnvironment : : ReloadDbEnv ( ) <nl> - { <nl> - / / Make sure that no Db ' s are in use <nl> - AssertLockNotHeld ( cs_db ) ; <nl> - std : : unique_lock < RecursiveMutex > lock ( cs_db ) ; <nl> - m_db_in_use . wait ( lock , [ this ] ( ) { <nl> - for ( auto & count : mapFileUseCount ) { <nl> - if ( count . second > 0 ) return false ; <nl> - } <nl> - return true ; <nl> - } ) ; <nl> - <nl> - std : : vector < std : : string > filenames ; <nl> - for ( auto it : m_databases ) { <nl> - filenames . push_back ( it . first ) ; <nl> - } <nl> - / / Close the individual Db ' s <nl> - for ( const std : : string & filename : filenames ) { <nl> - CloseDb ( filename ) ; <nl> - } <nl> - / / Reset the environment <nl> - Flush ( true ) ; / / This will flush and close the environment <nl> - Reset ( ) ; <nl> - Open ( true ) ; <nl> - } <nl> - <nl> - bool BerkeleyBatch : : Rewrite ( BerkeleyDatabase & database , const char * pszSkip ) <nl> - { <nl> - if ( database . IsDummy ( ) ) { <nl> - return true ; <nl> - } <nl> - BerkeleyEnvironment * env = database . env . get ( ) ; <nl> - const std : : string & strFile = database . strFile ; <nl> - while ( true ) { <nl> - { <nl> - LOCK ( cs_db ) ; <nl> - if ( ! env - > mapFileUseCount . count ( strFile ) | | env - > mapFileUseCount [ strFile ] = = 0 ) { <nl> - / / Flush log data to the dat file <nl> - env - > CloseDb ( strFile ) ; <nl> - env - > CheckpointLSN ( strFile ) ; <nl> - env - > mapFileUseCount . erase ( strFile ) ; <nl> - <nl> - bool fSuccess = true ; <nl> - LogPrintf ( " BerkeleyBatch : : Rewrite : Rewriting % s . . . \ n " , strFile ) ; <nl> - std : : string strFileRes = strFile + " . rewrite " ; <nl> - { / / surround usage of db with extra { } <nl> - BerkeleyBatch db ( database , " r " ) ; <nl> - std : : unique_ptr < Db > pdbCopy = MakeUnique < Db > ( env - > dbenv . get ( ) , 0 ) ; <nl> - <nl> - int ret = pdbCopy - > open ( nullptr , / / Txn pointer <nl> - strFileRes . c_str ( ) , / / Filename <nl> - " main " , / / Logical db name <nl> - DB_BTREE , / / Database type <nl> - DB_CREATE , / / Flags <nl> - 0 ) ; <nl> - if ( ret > 0 ) { <nl> - LogPrintf ( " BerkeleyBatch : : Rewrite : Can ' t create database file % s \ n " , strFileRes ) ; <nl> - fSuccess = false ; <nl> - } <nl> - <nl> - Dbc * pcursor = db . GetCursor ( ) ; <nl> - if ( pcursor ) <nl> - while ( fSuccess ) { <nl> - CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> - CDataStream ssValue ( SER_DISK , CLIENT_VERSION ) ; <nl> - int ret1 = db . ReadAtCursor ( pcursor , ssKey , ssValue ) ; <nl> - if ( ret1 = = DB_NOTFOUND ) { <nl> - pcursor - > close ( ) ; <nl> - break ; <nl> - } else if ( ret1 ! = 0 ) { <nl> - pcursor - > close ( ) ; <nl> - fSuccess = false ; <nl> - break ; <nl> - } <nl> - if ( pszSkip & & <nl> - strncmp ( ssKey . data ( ) , pszSkip , std : : min ( ssKey . size ( ) , strlen ( pszSkip ) ) ) = = 0 ) <nl> - continue ; <nl> - if ( strncmp ( ssKey . data ( ) , " \ x07version " , 8 ) = = 0 ) { <nl> - / / Update version : <nl> - ssValue . clear ( ) ; <nl> - ssValue < < CLIENT_VERSION ; <nl> - } <nl> - Dbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> - Dbt datValue ( ssValue . data ( ) , ssValue . size ( ) ) ; <nl> - int ret2 = pdbCopy - > put ( nullptr , & datKey , & datValue , DB_NOOVERWRITE ) ; <nl> - if ( ret2 > 0 ) <nl> - fSuccess = false ; <nl> - } <nl> - if ( fSuccess ) { <nl> - db . Close ( ) ; <nl> - env - > CloseDb ( strFile ) ; <nl> - if ( pdbCopy - > close ( 0 ) ) <nl> - fSuccess = false ; <nl> - } else { <nl> - pdbCopy - > close ( 0 ) ; <nl> - } <nl> - } <nl> - if ( fSuccess ) { <nl> - Db dbA ( env - > dbenv . get ( ) , 0 ) ; <nl> - if ( dbA . remove ( strFile . c_str ( ) , nullptr , 0 ) ) <nl> - fSuccess = false ; <nl> - Db dbB ( env - > dbenv . get ( ) , 0 ) ; <nl> - if ( dbB . rename ( strFileRes . c_str ( ) , nullptr , strFile . c_str ( ) , 0 ) ) <nl> - fSuccess = false ; <nl> - } <nl> - if ( ! fSuccess ) <nl> - LogPrintf ( " BerkeleyBatch : : Rewrite : Failed to rewrite database file % s \ n " , strFileRes ) ; <nl> - return fSuccess ; <nl> - } <nl> - } <nl> - UninterruptibleSleep ( std : : chrono : : milliseconds { 100 } ) ; <nl> - } <nl> - } <nl> - <nl> - <nl> - void BerkeleyEnvironment : : Flush ( bool fShutdown ) <nl> - { <nl> - int64_t nStart = GetTimeMillis ( ) ; <nl> - / / Flush log data to the actual data file on all files that are not in use <nl> - LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : [ % s ] Flush ( % s ) % s \ n " , strPath , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " ) ; <nl> - if ( ! fDbEnvInit ) <nl> - return ; <nl> - { <nl> - LOCK ( cs_db ) ; <nl> - std : : map < std : : string , int > : : iterator mi = mapFileUseCount . begin ( ) ; <nl> - while ( mi ! = mapFileUseCount . end ( ) ) { <nl> - std : : string strFile = ( * mi ) . first ; <nl> - int nRefCount = ( * mi ) . second ; <nl> - LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : Flushing % s ( refcount = % d ) . . . \ n " , strFile , nRefCount ) ; <nl> - if ( nRefCount = = 0 ) { <nl> - / / Move log data to the dat file <nl> - CloseDb ( strFile ) ; <nl> - LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : % s checkpoint \ n " , strFile ) ; <nl> - dbenv - > txn_checkpoint ( 0 , 0 , 0 ) ; <nl> - LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : % s detach \ n " , strFile ) ; <nl> - if ( ! fMockDb ) <nl> - dbenv - > lsn_reset ( strFile . c_str ( ) , 0 ) ; <nl> - LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : % s closed \ n " , strFile ) ; <nl> - mapFileUseCount . erase ( mi + + ) ; <nl> - } else <nl> - mi + + ; <nl> - } <nl> - LogPrint ( BCLog : : WALLETDB , " BerkeleyEnvironment : : Flush : Flush ( % s ) % s took % 15dms \ n " , fShutdown ? " true " : " false " , fDbEnvInit ? " " : " database not started " , GetTimeMillis ( ) - nStart ) ; <nl> - if ( fShutdown ) { <nl> - char * * listp ; <nl> - if ( mapFileUseCount . empty ( ) ) { <nl> - dbenv - > log_archive ( & listp , DB_ARCH_REMOVE ) ; <nl> - Close ( ) ; <nl> - if ( ! fMockDb ) { <nl> - fs : : remove_all ( fs : : path ( strPath ) / " database " ) ; <nl> - } <nl> - } <nl> - } <nl> - } <nl> - } <nl> - <nl> - bool BerkeleyBatch : : PeriodicFlush ( BerkeleyDatabase & database ) <nl> - { <nl> - if ( database . IsDummy ( ) ) { <nl> - return true ; <nl> - } <nl> - bool ret = false ; <nl> - BerkeleyEnvironment * env = database . env . get ( ) ; <nl> - const std : : string & strFile = database . strFile ; <nl> - TRY_LOCK ( cs_db , lockDb ) ; <nl> - if ( lockDb ) <nl> - { <nl> - / / Don ' t do this if any databases are in use <nl> - int nRefCount = 0 ; <nl> - std : : map < std : : string , int > : : iterator mit = env - > mapFileUseCount . begin ( ) ; <nl> - while ( mit ! = env - > mapFileUseCount . end ( ) ) <nl> - { <nl> - nRefCount + = ( * mit ) . second ; <nl> - mit + + ; <nl> - } <nl> - <nl> - if ( nRefCount = = 0 ) <nl> - { <nl> - std : : map < std : : string , int > : : iterator mi = env - > mapFileUseCount . find ( strFile ) ; <nl> - if ( mi ! = env - > mapFileUseCount . end ( ) ) <nl> - { <nl> - LogPrint ( BCLog : : WALLETDB , " Flushing % s \ n " , strFile ) ; <nl> - int64_t nStart = GetTimeMillis ( ) ; <nl> - <nl> - / / Flush wallet file so it ' s self contained <nl> - env - > CloseDb ( strFile ) ; <nl> - env - > CheckpointLSN ( strFile ) ; <nl> - <nl> - env - > mapFileUseCount . erase ( mi + + ) ; <nl> - LogPrint ( BCLog : : WALLETDB , " Flushed % s % dms \ n " , strFile , GetTimeMillis ( ) - nStart ) ; <nl> - ret = true ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - return ret ; <nl> - } <nl> - <nl> - bool BerkeleyDatabase : : Rewrite ( const char * pszSkip ) <nl> - { <nl> - return BerkeleyBatch : : Rewrite ( * this , pszSkip ) ; <nl> - } <nl> - <nl> - bool BerkeleyDatabase : : Backup ( const std : : string & strDest ) const <nl> - { <nl> - if ( IsDummy ( ) ) { <nl> - return false ; <nl> - } <nl> - while ( true ) <nl> - { <nl> - { <nl> - LOCK ( cs_db ) ; <nl> - if ( ! env - > mapFileUseCount . count ( strFile ) | | env - > mapFileUseCount [ strFile ] = = 0 ) <nl> - { <nl> - / / Flush log data to the dat file <nl> - env - > CloseDb ( strFile ) ; <nl> - env - > CheckpointLSN ( strFile ) ; <nl> - env - > mapFileUseCount . erase ( strFile ) ; <nl> - <nl> - / / Copy wallet file <nl> - fs : : path pathSrc = env - > Directory ( ) / strFile ; <nl> - fs : : path pathDest ( strDest ) ; <nl> - if ( fs : : is_directory ( pathDest ) ) <nl> - pathDest / = strFile ; <nl> - <nl> - try { <nl> - if ( fs : : equivalent ( pathSrc , pathDest ) ) { <nl> - LogPrintf ( " cannot backup to wallet source file % s \ n " , pathDest . string ( ) ) ; <nl> - return false ; <nl> - } <nl> - <nl> - fs : : copy_file ( pathSrc , pathDest , fs : : copy_option : : overwrite_if_exists ) ; <nl> - LogPrintf ( " copied % s to % s \ n " , strFile , pathDest . string ( ) ) ; <nl> - return true ; <nl> - } catch ( const fs : : filesystem_error & e ) { <nl> - LogPrintf ( " error copying % s to % s - % s \ n " , strFile , pathDest . string ( ) , fsbridge : : get_filesystem_error_message ( e ) ) ; <nl> - return false ; <nl> - } <nl> - } <nl> - } <nl> - UninterruptibleSleep ( std : : chrono : : milliseconds { 100 } ) ; <nl> - } <nl> - } <nl> - <nl> - void BerkeleyDatabase : : Flush ( bool shutdown ) <nl> - { <nl> - if ( ! IsDummy ( ) ) { <nl> - env - > Flush ( shutdown ) ; <nl> - if ( shutdown ) { <nl> - LOCK ( cs_db ) ; <nl> - g_dbenvs . erase ( env - > Directory ( ) . string ( ) ) ; <nl> - env = nullptr ; <nl> - } else { <nl> - / / TODO : To avoid g_dbenvs . erase erasing the environment prematurely after the <nl> - / / first database shutdown when multiple databases are open in the same <nl> - / / environment , should replace raw database ` env ` pointers with shared or weak <nl> - / / pointers , or else separate the database and environment shutdowns so <nl> - / / environments can be shut down after databases . <nl> - env - > m_fileids . erase ( strFile ) ; <nl> - } <nl> - } <nl> - } <nl> - <nl> - void BerkeleyDatabase : : ReloadDbEnv ( ) <nl> - { <nl> - if ( ! IsDummy ( ) ) { <nl> - env - > ReloadDbEnv ( ) ; <nl> - } <nl> - } <nl> - <nl> - std : : string BerkeleyDatabaseVersion ( ) <nl> - { <nl> - return DbEnv : : version ( nullptr , nullptr , nullptr ) ; <nl> - } <nl> mmm a / src / wallet / db . h <nl> ppp b / src / wallet / db . h <nl> <nl> # ifndef BITCOIN_WALLET_DB_H <nl> # define BITCOIN_WALLET_DB_H <nl> <nl> - # include < clientversion . h > <nl> # include < fs . h > <nl> - # include < serialize . h > <nl> - # include < streams . h > <nl> - # include < util / system . h > <nl> <nl> - # include < atomic > <nl> - # include < map > <nl> - # include < memory > <nl> # include < string > <nl> - # include < unordered_map > <nl> - # include < vector > <nl> - <nl> - # if defined ( __GNUC__ ) & & ! defined ( __clang__ ) <nl> - # pragma GCC diagnostic push <nl> - # pragma GCC diagnostic ignored " - Wsuggest - override " <nl> - # endif <nl> - # include < db_cxx . h > <nl> - # if defined ( __GNUC__ ) & & ! defined ( __clang__ ) <nl> - # pragma GCC diagnostic pop <nl> - # endif <nl> - <nl> - struct bilingual_str ; <nl> - <nl> - static const unsigned int DEFAULT_WALLET_DBLOGSIZE = 100 ; <nl> - static const bool DEFAULT_WALLET_PRIVDB = true ; <nl> - <nl> - struct WalletDatabaseFileId { <nl> - u_int8_t value [ DB_FILE_ID_LEN ] ; <nl> - bool operator = = ( const WalletDatabaseFileId & rhs ) const ; <nl> - } ; <nl> - <nl> - class BerkeleyDatabase ; <nl> - <nl> - class BerkeleyEnvironment <nl> - { <nl> - private : <nl> - bool fDbEnvInit ; <nl> - bool fMockDb ; <nl> - / / Don ' t change into fs : : path , as that can result in <nl> - / / shutdown problems / crashes caused by a static initialized internal pointer . <nl> - std : : string strPath ; <nl> - <nl> - public : <nl> - std : : unique_ptr < DbEnv > dbenv ; <nl> - std : : map < std : : string , int > mapFileUseCount ; <nl> - std : : map < std : : string , std : : reference_wrapper < BerkeleyDatabase > > m_databases ; <nl> - std : : unordered_map < std : : string , WalletDatabaseFileId > m_fileids ; <nl> - std : : condition_variable_any m_db_in_use ; <nl> - <nl> - BerkeleyEnvironment ( const fs : : path & env_directory ) ; <nl> - BerkeleyEnvironment ( ) ; <nl> - ~ BerkeleyEnvironment ( ) ; <nl> - void Reset ( ) ; <nl> - <nl> - bool IsMock ( ) const { return fMockDb ; } <nl> - bool IsInitialized ( ) const { return fDbEnvInit ; } <nl> - bool IsDatabaseLoaded ( const std : : string & db_filename ) const { return m_databases . find ( db_filename ) ! = m_databases . end ( ) ; } <nl> - fs : : path Directory ( ) const { return strPath ; } <nl> - <nl> - bool Verify ( const std : : string & strFile ) ; <nl> - <nl> - bool Open ( bool retry ) ; <nl> - void Close ( ) ; <nl> - void Flush ( bool fShutdown ) ; <nl> - void CheckpointLSN ( const std : : string & strFile ) ; <nl> - <nl> - void CloseDb ( const std : : string & strFile ) ; <nl> - void ReloadDbEnv ( ) ; <nl> - <nl> - DbTxn * TxnBegin ( int flags = DB_TXN_WRITE_NOSYNC ) <nl> - { <nl> - DbTxn * ptxn = nullptr ; <nl> - int ret = dbenv - > txn_begin ( nullptr , & ptxn , flags ) ; <nl> - if ( ! ptxn | | ret ! = 0 ) <nl> - return nullptr ; <nl> - return ptxn ; <nl> - } <nl> - } ; <nl> - <nl> - / * * Return whether a wallet database is currently loaded . * / <nl> - bool IsWalletLoaded ( const fs : : path & wallet_path ) ; <nl> <nl> / * * Given a wallet directory path or legacy file path , return path to main data file in the wallet database . * / <nl> fs : : path WalletDataFilePath ( const fs : : path & wallet_path ) ; <nl> - <nl> - / * * Get BerkeleyEnvironment and database filename given a wallet path . * / <nl> - std : : shared_ptr < BerkeleyEnvironment > GetWalletEnv ( const fs : : path & wallet_path , std : : string & database_filename ) ; <nl> - <nl> - / * * An instance of this class represents one database . <nl> - * For BerkeleyDB this is just a ( env , strFile ) tuple . <nl> - * * / <nl> - class BerkeleyDatabase <nl> - { <nl> - friend class BerkeleyBatch ; <nl> - public : <nl> - / * * Create dummy DB handle * / <nl> - BerkeleyDatabase ( ) : nUpdateCounter ( 0 ) , nLastSeen ( 0 ) , nLastFlushed ( 0 ) , nLastWalletUpdate ( 0 ) , env ( nullptr ) <nl> - { <nl> - } <nl> - <nl> - / * * Create DB handle to real database * / <nl> - BerkeleyDatabase ( std : : shared_ptr < BerkeleyEnvironment > env , std : : string filename ) : <nl> - nUpdateCounter ( 0 ) , nLastSeen ( 0 ) , nLastFlushed ( 0 ) , nLastWalletUpdate ( 0 ) , env ( std : : move ( env ) ) , strFile ( std : : move ( filename ) ) <nl> - { <nl> - auto inserted = this - > env - > m_databases . emplace ( strFile , std : : ref ( * this ) ) ; <nl> - assert ( inserted . second ) ; <nl> - } <nl> - <nl> - ~ BerkeleyDatabase ( ) { <nl> - if ( env ) { <nl> - size_t erased = env - > m_databases . erase ( strFile ) ; <nl> - assert ( erased = = 1 ) ; <nl> - } <nl> - } <nl> - <nl> - / * * Return object for accessing database at specified path . * / <nl> - static std : : unique_ptr < BerkeleyDatabase > Create ( const fs : : path & path ) <nl> - { <nl> - std : : string filename ; <nl> - return MakeUnique < BerkeleyDatabase > ( GetWalletEnv ( path , filename ) , std : : move ( filename ) ) ; <nl> - } <nl> - <nl> - / * * Return object for accessing dummy database with no read / write capabilities . * / <nl> - static std : : unique_ptr < BerkeleyDatabase > CreateDummy ( ) <nl> - { <nl> - return MakeUnique < BerkeleyDatabase > ( ) ; <nl> - } <nl> - <nl> - / * * Return object for accessing temporary in - memory database . * / <nl> - static std : : unique_ptr < BerkeleyDatabase > CreateMock ( ) <nl> - { <nl> - return MakeUnique < BerkeleyDatabase > ( std : : make_shared < BerkeleyEnvironment > ( ) , " " ) ; <nl> - } <nl> - <nl> - / * * Rewrite the entire database on disk , with the exception of key pszSkip if non - zero <nl> - * / <nl> - bool Rewrite ( const char * pszSkip = nullptr ) ; <nl> - <nl> - / * * Back up the entire database to a file . <nl> - * / <nl> - bool Backup ( const std : : string & strDest ) const ; <nl> - <nl> - / * * Make sure all changes are flushed to disk . <nl> - * / <nl> - void Flush ( bool shutdown ) ; <nl> - <nl> - void IncrementUpdateCounter ( ) ; <nl> - <nl> - void ReloadDbEnv ( ) ; <nl> - <nl> - std : : atomic < unsigned int > nUpdateCounter ; <nl> - unsigned int nLastSeen ; <nl> - unsigned int nLastFlushed ; <nl> - int64_t nLastWalletUpdate ; <nl> - <nl> - / * * <nl> - * Pointer to shared database environment . <nl> - * <nl> - * Normally there is only one BerkeleyDatabase object per <nl> - * BerkeleyEnvivonment , but in the special , backwards compatible case where <nl> - * multiple wallet BDB data files are loaded from the same directory , this <nl> - * will point to a shared instance that gets freed when the last data file <nl> - * is closed . <nl> - * / <nl> - std : : shared_ptr < BerkeleyEnvironment > env ; <nl> - <nl> - / * * Database pointer . This is initialized lazily and reset during flushes , so it can be null . * / <nl> - std : : unique_ptr < Db > m_db ; <nl> - <nl> - private : <nl> - std : : string strFile ; <nl> - <nl> - / * * Return whether this database handle is a dummy for testing . <nl> - * Only to be used at a low level , application should ideally not care <nl> - * about this . <nl> - * / <nl> - bool IsDummy ( ) const { return env = = nullptr ; } <nl> - } ; <nl> - <nl> - / * * RAII class that provides access to a Berkeley database * / <nl> - class BerkeleyBatch <nl> - { <nl> - / * * RAII class that automatically cleanses its data on destruction * / <nl> - class SafeDbt final <nl> - { <nl> - Dbt m_dbt ; <nl> - <nl> - public : <nl> - / / construct Dbt with internally - managed data <nl> - SafeDbt ( ) ; <nl> - / / construct Dbt with provided data <nl> - SafeDbt ( void * data , size_t size ) ; <nl> - ~ SafeDbt ( ) ; <nl> - <nl> - / / delegate to Dbt <nl> - const void * get_data ( ) const ; <nl> - u_int32_t get_size ( ) const ; <nl> - <nl> - / / conversion operator to access the underlying Dbt <nl> - operator Dbt * ( ) ; <nl> - } ; <nl> - <nl> - protected : <nl> - Db * pdb ; <nl> - std : : string strFile ; <nl> - DbTxn * activeTxn ; <nl> - bool fReadOnly ; <nl> - bool fFlushOnClose ; <nl> - BerkeleyEnvironment * env ; <nl> - <nl> - public : <nl> - explicit BerkeleyBatch ( BerkeleyDatabase & database , const char * pszMode = " r + " , bool fFlushOnCloseIn = true ) ; <nl> - ~ BerkeleyBatch ( ) { Close ( ) ; } <nl> - <nl> - BerkeleyBatch ( const BerkeleyBatch & ) = delete ; <nl> - BerkeleyBatch & operator = ( const BerkeleyBatch & ) = delete ; <nl> - <nl> - void Flush ( ) ; <nl> - void Close ( ) ; <nl> - <nl> - / * flush the wallet passively ( TRY_LOCK ) <nl> - ideal to be called periodically * / <nl> - static bool PeriodicFlush ( BerkeleyDatabase & database ) ; <nl> - / * verifies the database environment * / <nl> - static bool VerifyEnvironment ( const fs : : path & file_path , bilingual_str & errorStr ) ; <nl> - / * verifies the database file * / <nl> - static bool VerifyDatabaseFile ( const fs : : path & file_path , bilingual_str & errorStr ) ; <nl> - <nl> - template < typename K , typename T > <nl> - bool Read ( const K & key , T & value ) <nl> - { <nl> - if ( ! pdb ) <nl> - return false ; <nl> - <nl> - / / Key <nl> - CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> - ssKey . reserve ( 1000 ) ; <nl> - ssKey < < key ; <nl> - SafeDbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> - <nl> - / / Read <nl> - SafeDbt datValue ; <nl> - int ret = pdb - > get ( activeTxn , datKey , datValue , 0 ) ; <nl> - bool success = false ; <nl> - if ( datValue . get_data ( ) ! = nullptr ) { <nl> - / / Unserialize value <nl> - try { <nl> - CDataStream ssValue ( ( char * ) datValue . get_data ( ) , ( char * ) datValue . get_data ( ) + datValue . get_size ( ) , SER_DISK , CLIENT_VERSION ) ; <nl> - ssValue > > value ; <nl> - success = true ; <nl> - } catch ( const std : : exception & ) { <nl> - / / In this case success remains ' false ' <nl> - } <nl> - } <nl> - return ret = = 0 & & success ; <nl> - } <nl> - <nl> - template < typename K , typename T > <nl> - bool Write ( const K & key , const T & value , bool fOverwrite = true ) <nl> - { <nl> - if ( ! pdb ) <nl> - return true ; <nl> - if ( fReadOnly ) <nl> - assert ( ! " Write called on database in read - only mode " ) ; <nl> - <nl> - / / Key <nl> - CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> - ssKey . reserve ( 1000 ) ; <nl> - ssKey < < key ; <nl> - SafeDbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> - <nl> - / / Value <nl> - CDataStream ssValue ( SER_DISK , CLIENT_VERSION ) ; <nl> - ssValue . reserve ( 10000 ) ; <nl> - ssValue < < value ; <nl> - SafeDbt datValue ( ssValue . data ( ) , ssValue . size ( ) ) ; <nl> - <nl> - / / Write <nl> - int ret = pdb - > put ( activeTxn , datKey , datValue , ( fOverwrite ? 0 : DB_NOOVERWRITE ) ) ; <nl> - return ( ret = = 0 ) ; <nl> - } <nl> - <nl> - template < typename K > <nl> - bool Erase ( const K & key ) <nl> - { <nl> - if ( ! pdb ) <nl> - return false ; <nl> - if ( fReadOnly ) <nl> - assert ( ! " Erase called on database in read - only mode " ) ; <nl> - <nl> - / / Key <nl> - CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> - ssKey . reserve ( 1000 ) ; <nl> - ssKey < < key ; <nl> - SafeDbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> - <nl> - / / Erase <nl> - int ret = pdb - > del ( activeTxn , datKey , 0 ) ; <nl> - return ( ret = = 0 | | ret = = DB_NOTFOUND ) ; <nl> - } <nl> - <nl> - template < typename K > <nl> - bool Exists ( const K & key ) <nl> - { <nl> - if ( ! pdb ) <nl> - return false ; <nl> - <nl> - / / Key <nl> - CDataStream ssKey ( SER_DISK , CLIENT_VERSION ) ; <nl> - ssKey . reserve ( 1000 ) ; <nl> - ssKey < < key ; <nl> - SafeDbt datKey ( ssKey . data ( ) , ssKey . size ( ) ) ; <nl> - <nl> - / / Exists <nl> - int ret = pdb - > exists ( activeTxn , datKey , 0 ) ; <nl> - return ( ret = = 0 ) ; <nl> - } <nl> - <nl> - Dbc * GetCursor ( ) <nl> - { <nl> - if ( ! pdb ) <nl> - return nullptr ; <nl> - Dbc * pcursor = nullptr ; <nl> - int ret = pdb - > cursor ( nullptr , & pcursor , 0 ) ; <nl> - if ( ret ! = 0 ) <nl> - return nullptr ; <nl> - return pcursor ; <nl> - } <nl> - <nl> - int ReadAtCursor ( Dbc * pcursor , CDataStream & ssKey , CDataStream & ssValue ) <nl> - { <nl> - / / Read at cursor <nl> - SafeDbt datKey ; <nl> - SafeDbt datValue ; <nl> - int ret = pcursor - > get ( datKey , datValue , DB_NEXT ) ; <nl> - if ( ret ! = 0 ) <nl> - return ret ; <nl> - else if ( datKey . get_data ( ) = = nullptr | | datValue . get_data ( ) = = nullptr ) <nl> - return 99999 ; <nl> - <nl> - / / Convert to streams <nl> - ssKey . SetType ( SER_DISK ) ; <nl> - ssKey . clear ( ) ; <nl> - ssKey . write ( ( char * ) datKey . get_data ( ) , datKey . get_size ( ) ) ; <nl> - ssValue . SetType ( SER_DISK ) ; <nl> - ssValue . clear ( ) ; <nl> - ssValue . write ( ( char * ) datValue . get_data ( ) , datValue . get_size ( ) ) ; <nl> - return 0 ; <nl> - } <nl> - <nl> - bool TxnBegin ( ) <nl> - { <nl> - if ( ! pdb | | activeTxn ) <nl> - return false ; <nl> - DbTxn * ptxn = env - > TxnBegin ( ) ; <nl> - if ( ! ptxn ) <nl> - return false ; <nl> - activeTxn = ptxn ; <nl> - return true ; <nl> - } <nl> - <nl> - bool TxnCommit ( ) <nl> - { <nl> - if ( ! pdb | | ! activeTxn ) <nl> - return false ; <nl> - int ret = activeTxn - > commit ( 0 ) ; <nl> - activeTxn = nullptr ; <nl> - return ( ret = = 0 ) ; <nl> - } <nl> - <nl> - bool TxnAbort ( ) <nl> - { <nl> - if ( ! pdb | | ! activeTxn ) <nl> - return false ; <nl> - int ret = activeTxn - > abort ( ) ; <nl> - activeTxn = nullptr ; <nl> - return ( ret = = 0 ) ; <nl> - } <nl> - <nl> - bool static Rewrite ( BerkeleyDatabase & database , const char * pszSkip = nullptr ) ; <nl> - } ; <nl> - <nl> - std : : string BerkeleyDatabaseVersion ( ) ; <nl> + void SplitWalletPath ( const fs : : path & wallet_path , fs : : path & env_directory , std : : string & database_filename ) ; <nl> <nl> # endif / / BITCOIN_WALLET_DB_H <nl> mmm a / src / wallet / test / db_tests . cpp <nl> ppp b / src / wallet / test / db_tests . cpp <nl> <nl> <nl> # include < fs . h > <nl> # include < test / util / setup_common . h > <nl> - # include < wallet / db . h > <nl> + # include < wallet / bdb . h > <nl> <nl> <nl> BOOST_FIXTURE_TEST_SUITE ( db_tests , BasicTestingSetup ) <nl> mmm a / src / wallet / walletdb . cpp <nl> ppp b / src / wallet / walletdb . cpp <nl> bool WalletBatch : : TxnAbort ( ) <nl> { <nl> return m_batch . TxnAbort ( ) ; <nl> } <nl> + <nl> + bool IsWalletLoaded ( const fs : : path & wallet_path ) <nl> + { <nl> + return IsBDBWalletLoaded ( wallet_path ) ; <nl> + } <nl> mmm a / src / wallet / walletdb . h <nl> ppp b / src / wallet / walletdb . h <nl> <nl> <nl> # include < amount . h > <nl> # include < script / sign . h > <nl> + # include < wallet / bdb . h > <nl> # include < wallet / db . h > <nl> # include < wallet / walletutil . h > <nl> # include < key . h > <nl> void MaybeCompactWalletDB ( ) ; <nl> / / ! Unserialize a given Key - Value pair and load it into the wallet <nl> bool ReadKeyValue ( CWallet * pwallet , CDataStream & ssKey , CDataStream & ssValue , std : : string & strType , std : : string & strErr ) ; <nl> <nl> + / * * Return whether a wallet database is currently loaded . * / <nl> + bool IsWalletLoaded ( const fs : : path & wallet_path ) ; <nl> + <nl> # endif / / BITCOIN_WALLET_WALLETDB_H <nl>
Merge : wallet : move BDB specific classes to bdb . { cpp / h }
bitcoin/bitcoin
62d863f9157df54bfb109d68114ada8130ecd3f0
2020-06-17T09:49:42Z
mmm a / Telegram / Resources / langs / lang . strings <nl> ppp b / Telegram / Resources / langs / lang . strings <nl> Copyright ( c ) 2014 - 2017 John Preston , https : / / desktop . telegram . org <nl> " lng_edited " = " edited " ; <nl> " lng_edited_date " = " Edited : { date } " ; <nl> " lng_admin_badge " = " admin " ; <nl> + " lng_fast_reply " = " Reply " ; <nl> " lng_cancel_edit_post_sure " = " Cancel editing ? " ; <nl> " lng_cancel_edit_post_yes " = " Yes " ; <nl> " lng_cancel_edit_post_no " = " No " ; <nl> mmm a / Telegram / SourceFiles / history / history_message . cpp <nl> ppp b / Telegram / SourceFiles / history / history_message . cpp <nl> QString AdminBadgeText ( ) { <nl> return lang ( lng_admin_badge ) ; <nl> } <nl> <nl> + QString FastReplyText ( ) { <nl> + return lang ( lng_fast_reply ) ; <nl> + } <nl> + <nl> style : : color FromNameFg ( not_null < PeerData * > peer , bool selected ) { <nl> if ( selected ) { <nl> const style : : color colors [ ] = { <nl> void HistoryMessage : : initDimensions ( ) { <nl> if ( via & & ! forwarded ) { <nl> namew + = st : : msgServiceFont - > spacew + via - > maxWidth ; <nl> } <nl> + const auto replyWidth = hasFastReply ( ) <nl> + ? st : : msgFont - > width ( FastReplyText ( ) ) <nl> + : 0 ; <nl> if ( _flags & MTPDmessage_ClientFlag : : f_has_admin_badge ) { <nl> - auto badgeWidth = st : : msgServiceFont - > width ( <nl> + const auto badgeWidth = st : : msgFont - > width ( <nl> AdminBadgeText ( ) ) ; <nl> - namew + = st : : msgPadding . right ( ) + badgeWidth ; <nl> + namew + = st : : msgPadding . right ( ) <nl> + + std : : max ( badgeWidth , replyWidth ) ; <nl> + } else if ( replyWidth ) { <nl> + namew + = st : : msgPadding . right ( ) + replyWidth ; <nl> } <nl> accumulate_max ( _maxw , namew ) ; <nl> } else if ( via & & ! forwarded ) { <nl> bool HistoryMessage : : hasFromName ( ) const { <nl> & & ( ! history ( ) - > peer - > isUser ( ) | | history ( ) - > peer - > isSelf ( ) ) ; <nl> } <nl> <nl> + bool HistoryMessage : : hasFastReply ( ) const { <nl> + return ! hasOutLayout ( ) <nl> + & & ( history ( ) - > peer - > isChat ( ) | | history ( ) - > peer - > isMegagroup ( ) ) ; <nl> + } <nl> + <nl> + bool HistoryMessage : : displayFastReply ( ) const { <nl> + return hasFastReply ( ) & & history ( ) - > peer - > canWrite ( ) ; <nl> + } <nl> + <nl> QRect HistoryMessage : : countGeometry ( ) const { <nl> auto maxwidth = qMin ( st : : msgMaxWidth , _maxw ) ; <nl> if ( _media & & _media - > currentWidth ( ) < maxwidth ) { <nl> QRect HistoryMessage : : countGeometry ( ) const { <nl> } <nl> <nl> void HistoryMessage : : fromNameUpdated ( int32 width ) const { <nl> + const auto replyWidth = hasFastReply ( ) <nl> + ? st : : msgFont - > width ( FastReplyText ( ) ) <nl> + : 0 ; <nl> if ( _flags & MTPDmessage_ClientFlag : : f_has_admin_badge ) { <nl> - auto badgeWidth = st : : msgServiceFont - > width ( <nl> - AdminBadgeText ( ) ) ; <nl> - width - = st : : msgPadding . right ( ) + badgeWidth ; <nl> + const auto badgeWidth = st : : msgFont - > width ( AdminBadgeText ( ) ) ; <nl> + width - = st : : msgPadding . right ( ) + std : : max ( badgeWidth , replyWidth ) ; <nl> + } else if ( replyWidth ) { <nl> + width - = st : : msgPadding . right ( ) + replyWidth ; <nl> } <nl> _fromNameVersion = displayFrom ( ) - > nameVersion ; <nl> if ( ! Has < HistoryMessageForwarded > ( ) ) { <nl> void HistoryMessage : : drawRightAction ( Painter & p , int left , int top , int outerWid <nl> } <nl> } <nl> <nl> - void HistoryMessage : : paintFromName ( Painter & p , QRect & trect , bool selected ) const { <nl> + void HistoryMessage : : paintFromName ( <nl> + Painter & p , <nl> + QRect & trect , <nl> + bool selected ) const { <nl> if ( displayFromName ( ) ) { <nl> - auto badgeWidth = [ & ] { <nl> + const auto badgeWidth = [ & ] { <nl> if ( _flags & MTPDmessage_ClientFlag : : f_has_admin_badge ) { <nl> - return st : : msgServiceFont - > width ( AdminBadgeText ( ) ) ; <nl> + return st : : msgFont - > width ( AdminBadgeText ( ) ) ; <nl> + } <nl> + return 0 ; <nl> + } ( ) ; <nl> + const auto replyWidth = [ & ] { <nl> + if ( App : : hoveredItem ( ) = = this & & displayFastReply ( ) ) { <nl> + return st : : msgFont - > width ( FastReplyText ( ) ) ; <nl> } <nl> return 0 ; <nl> } ( ) ; <nl> + const auto rightWidth = replyWidth ? replyWidth : badgeWidth ; <nl> auto availableLeft = trect . left ( ) ; <nl> auto availableWidth = trect . width ( ) ; <nl> - if ( badgeWidth ) { <nl> - availableWidth - = st : : msgPadding . right ( ) + badgeWidth ; <nl> + if ( rightWidth ) { <nl> + availableWidth - = st : : msgPadding . right ( ) + rightWidth ; <nl> } <nl> <nl> p . setFont ( st : : msgNameFont ) ; <nl> void HistoryMessage : : paintFromName ( Painter & p , QRect & trect , bool selected ) cons <nl> availableLeft + = skipWidth ; <nl> availableWidth - = skipWidth ; <nl> } <nl> - if ( badgeWidth ) { <nl> + if ( rightWidth ) { <nl> p . setPen ( selected ? st : : msgInDateFgSelected : st : : msgInDateFg ) ; <nl> - p . setFont ( st : : msgFont ) ; <nl> + p . setFont ( ClickHandler : : showAsActive ( _fastReplyLink ) <nl> + ? st : : msgFont - > underline ( ) <nl> + : st : : msgFont ) ; <nl> p . drawText ( <nl> - trect . left ( ) + trect . width ( ) - badgeWidth , <nl> + trect . left ( ) + trect . width ( ) - rightWidth , <nl> trect . top ( ) + st : : msgFont - > ascent , <nl> - AdminBadgeText ( ) ) ; <nl> + replyWidth ? FastReplyText ( ) : AdminBadgeText ( ) ) ; <nl> } <nl> trect . setY ( trect . y ( ) + st : : msgNameFont - > height ) ; <nl> } <nl> ClickHandlerPtr HistoryMessage : : rightActionLink ( ) const { <nl> return _rightActionLink ; <nl> } <nl> <nl> + ClickHandlerPtr HistoryMessage : : fastReplyLink ( ) const { <nl> + if ( ! _fastReplyLink ) { <nl> + const auto itemId = fullId ( ) ; <nl> + _fastReplyLink = std : : make_shared < LambdaClickHandler > ( [ = ] { <nl> + if ( const auto item = App : : histItemById ( itemId ) ) { <nl> + if ( const auto main = App : : main ( ) ) { <nl> + main - > replyToItem ( item ) ; <nl> + } <nl> + } <nl> + } ) ; <nl> + } <nl> + return _fastReplyLink ; <nl> + } <nl> + <nl> / / Forward to _media . <nl> void HistoryMessage : : updatePressed ( QPoint point ) { <nl> if ( ! _media ) return ; <nl> bool HistoryMessage : : getStateFromName ( <nl> QRect & trect , <nl> not_null < HistoryTextState * > outResult ) const { <nl> if ( displayFromName ( ) ) { <nl> + const auto replyWidth = [ & ] { <nl> + if ( App : : hoveredItem ( ) = = this & & displayFastReply ( ) ) { <nl> + return st : : msgFont - > width ( FastReplyText ( ) ) ; <nl> + } <nl> + return 0 ; <nl> + } ( ) ; <nl> + if ( replyWidth <nl> + & & point . x ( ) > = trect . left ( ) + trect . width ( ) - replyWidth <nl> + & & point . x ( ) < trect . left ( ) + trect . width ( ) + st : : msgPadding . right ( ) <nl> + & & point . y ( ) > = trect . top ( ) - st : : msgPadding . top ( ) <nl> + & & point . y ( ) < trect . top ( ) + st : : msgServiceFont - > height ) { <nl> + outResult - > link = fastReplyLink ( ) ; <nl> + return true ; <nl> + } <nl> if ( point . y ( ) > = trect . top ( ) & & point . y ( ) < trect . top ( ) + st : : msgNameFont - > height ) { <nl> + auto availableLeft = trect . left ( ) ; <nl> + auto availableWidth = trect . width ( ) ; <nl> + if ( replyWidth ) { <nl> + availableWidth - = st : : msgPadding . right ( ) + replyWidth ; <nl> + } <nl> auto user = displayFrom ( ) ; <nl> - if ( point . x ( ) > = trect . left ( ) & & point . x ( ) < trect . left ( ) + trect . width ( ) & & point . x ( ) < trect . left ( ) + user - > nameText . maxWidth ( ) ) { <nl> + if ( point . x ( ) > = availableLeft <nl> + & & point . x ( ) < availableLeft + availableWidth <nl> + & & point . x ( ) < availableLeft + user - > nameText . maxWidth ( ) ) { <nl> outResult - > link = user - > openLink ( ) ; <nl> return true ; <nl> } <nl> auto forwarded = Get < HistoryMessageForwarded > ( ) ; <nl> auto via = Get < HistoryMessageVia > ( ) ; <nl> - if ( via & & ! forwarded & & point . x ( ) > = trect . left ( ) + author ( ) - > nameText . maxWidth ( ) + st : : msgServiceFont - > spacew & & point . x ( ) < trect . left ( ) + user - > nameText . maxWidth ( ) + st : : msgServiceFont - > spacew + via - > width ) { <nl> + if ( via <nl> + & & ! forwarded <nl> + & & point . x ( ) > = availableLeft + author ( ) - > nameText . maxWidth ( ) + st : : msgServiceFont - > spacew <nl> + & & point . x ( ) < availableLeft + availableWidth <nl> + & & point . x ( ) < availableLeft + user - > nameText . maxWidth ( ) + st : : msgServiceFont - > spacew + via - > width ) { <nl> outResult - > link = via - > link ; <nl> return true ; <nl> } <nl> mmm a / Telegram / SourceFiles / history / history_message . h <nl> ppp b / Telegram / SourceFiles / history / history_message . h <nl> class HistoryMessage <nl> if ( isAttachedToPrevious ( ) ) return false ; <nl> return true ; <nl> } <nl> + bool hasFastReply ( ) const ; <nl> + bool displayFastReply ( ) const ; <nl> bool displayForwardedFrom ( ) const ; <nl> bool uploading ( ) const ; <nl> bool displayRightAction ( ) const override ; <nl> class HistoryMessage <nl> bool displayFastShare ( ) const ; <nl> bool displayGoToOriginal ( ) const ; <nl> <nl> - QString _timeText ; <nl> - int _timeWidth = 0 ; <nl> - <nl> - mutable ClickHandlerPtr _rightActionLink ; <nl> - mutable int32 _fromNameVersion = 0 ; <nl> - <nl> struct CreateConfig ; <nl> void createComponentsHelper ( MTPDmessage : : Flags flags , MsgId replyTo , UserId viaBotId , const QString & postAuthor , const MTPReplyMarkup & markup ) ; <nl> void createComponents ( const CreateConfig & config ) ; <nl> <nl> void updateMediaInBubbleState ( ) ; <nl> void updateAdminBadgeState ( ) ; <nl> + ClickHandlerPtr fastReplyLink ( ) const ; <nl> + <nl> + QString _timeText ; <nl> + int _timeWidth = 0 ; <nl> + <nl> + mutable ClickHandlerPtr _rightActionLink ; <nl> + mutable ClickHandlerPtr _fastReplyLink ; <nl> + mutable int32 _fromNameVersion = 0 ; <nl> <nl> } ; <nl> mmm a / Telegram / SourceFiles / mainwidget . cpp <nl> ppp b / Telegram / SourceFiles / mainwidget . cpp <nl> bool MainWidget : : shareUrl ( <nl> return true ; <nl> } <nl> <nl> + void MainWidget : : replyToItem ( not_null < HistoryItem * > item ) { <nl> + if ( _history - > peer ( ) = = item - > history ( ) - > peer <nl> + | | _history - > peer ( ) = = item - > history ( ) - > peer - > migrateTo ( ) ) { <nl> + App : : contextItem ( item ) ; <nl> + _history - > onReplyToMessage ( ) ; <nl> + } <nl> + } <nl> + <nl> bool MainWidget : : onInlineSwitchChosen ( const PeerId & peer , const QString & botAndQuery ) { <nl> PeerData * p = App : : peer ( peer ) ; <nl> if ( ! peer | | ! p - > canWrite ( ) ) { <nl> mmm a / Telegram / SourceFiles / mainwidget . h <nl> ppp b / Telegram / SourceFiles / mainwidget . h <nl> class MainWidget : public Ui : : RpWidget , public RPCSender , private base : : Subscrib <nl> not_null < PeerData * > peer , <nl> const QString & url , <nl> const QString & text ) ; <nl> + void replyToItem ( not_null < HistoryItem * > item ) ; <nl> bool onInlineSwitchChosen ( const PeerId & peer , const QString & botAndQuery ) ; <nl> bool onSendPaths ( const PeerId & peer ) ; <nl> void onFilesOrForwardDrop ( const PeerId & peer , const QMimeData * data ) ; <nl>
Add fast reply button in groups .
telegramdesktop/tdesktop
ae7e5be5cdcde1b09160c8beb98727251271186a
2017-12-30T18:54:15Z
mmm a / api / envoy / config / listener / v3 / listener . proto <nl> ppp b / api / envoy / config / listener / v3 / listener . proto <nl> message ListenerCollection { <nl> repeated udpa . core . v1 . CollectionEntry entries = 1 ; <nl> } <nl> <nl> - / / [ # next - free - field : 25 ] <nl> + / / [ # next - free - field : 26 ] <nl> message Listener { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . Listener " ; <nl> <nl> message Listener { <nl> / / : ref : ` FAQ entry < faq_how_to_setup_sni > ` . <nl> repeated FilterChain filter_chains = 3 ; <nl> <nl> + / / The default filter chain if none of the filter chain matches . If no default filter chain is supplied , <nl> + / / the connection will be closed . The filter chain match is ignored in this field . <nl> + FilterChain default_filter_chain = 25 ; <nl> + <nl> / / Soft limit on size of the listener ’ s new connection read and write buffers . <nl> / / If unspecified , an implementation defined default is applied ( 1MiB ) . <nl> google . protobuf . UInt32Value per_connection_buffer_limit_bytes = 5 <nl> mmm a / api / envoy / config / listener / v3 / listener_components . proto <nl> ppp b / api / envoy / config / listener / v3 / listener_components . proto <nl> message Filter { <nl> / / ` ` www . example . com ` ` , then ` ` * . example . com ` ` , then ` ` * . com ` ` , then any filter <nl> / / chain without ` ` server_names ` ` requirements ) . <nl> / / <nl> + / / A different way to reason about the filter chain matches : <nl> + / / Suppose there exists N filter chains . Prune the filter chain set using the above 8 steps . <nl> + / / In each step , filter chains which most specifically matches the attributes continue to the next step . <nl> + / / The listener guarantees at most 1 filter chain is left after all of the steps . <nl> + / / <nl> + / / Example : <nl> + / / <nl> + / / For destination port , filter chains specifying the destination port of incoming traffic are the <nl> + / / most specific match . If none of the filter chains specifies the exact destination port , the filter <nl> + / / chains which do not specify ports are the most specific match . Filter chains specifying the <nl> + / / wrong port can never be the most specific match . <nl> + / / <nl> / / [ # comment : Implemented rules are kept in the preference order , with deprecated fields <nl> / / listed at the end , because that ' s how we want to list them in the docs . <nl> / / <nl> mmm a / api / envoy / config / listener / v4alpha / listener . proto <nl> ppp b / api / envoy / config / listener / v4alpha / listener . proto <nl> message ListenerCollection { <nl> repeated udpa . core . v1 . CollectionEntry entries = 1 ; <nl> } <nl> <nl> - / / [ # next - free - field : 25 ] <nl> + / / [ # next - free - field : 26 ] <nl> message Listener { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . config . listener . v3 . Listener " ; <nl> <nl> message Listener { <nl> / / : ref : ` FAQ entry < faq_how_to_setup_sni > ` . <nl> repeated FilterChain filter_chains = 3 ; <nl> <nl> + / / The default filter chain if none of the filter chain matches . If no default filter chain is supplied , <nl> + / / the connection will be closed . The filter chain match is ignored in this field . <nl> + FilterChain default_filter_chain = 25 ; <nl> + <nl> / / Soft limit on size of the listener ’ s new connection read and write buffers . <nl> / / If unspecified , an implementation defined default is applied ( 1MiB ) . <nl> google . protobuf . UInt32Value per_connection_buffer_limit_bytes = 5 <nl> mmm a / api / envoy / config / listener / v4alpha / listener_components . proto <nl> ppp b / api / envoy / config / listener / v4alpha / listener_components . proto <nl> message Filter { <nl> / / ` ` www . example . com ` ` , then ` ` * . example . com ` ` , then ` ` * . com ` ` , then any filter <nl> / / chain without ` ` server_names ` ` requirements ) . <nl> / / <nl> + / / A different way to reason about the filter chain matches : <nl> + / / Suppose there exists N filter chains . Prune the filter chain set using the above 8 steps . <nl> + / / In each step , filter chains which most specifically matches the attributes continue to the next step . <nl> + / / The listener guarantees at most 1 filter chain is left after all of the steps . <nl> + / / <nl> + / / Example : <nl> + / / <nl> + / / For destination port , filter chains specifying the destination port of incoming traffic are the <nl> + / / most specific match . If none of the filter chains specifies the exact destination port , the filter <nl> + / / chains which do not specify ports are the most specific match . Filter chains specifying the <nl> + / / wrong port can never be the most specific match . <nl> + / / <nl> / / [ # comment : Implemented rules are kept in the preference order , with deprecated fields <nl> / / listed at the end , because that ' s how we want to list them in the docs . <nl> / / <nl> mmm a / docs / root / configuration / listeners / lds . rst <nl> ppp b / docs / root / configuration / listeners / lds . rst <nl> The semantics of listener updates are as follows : <nl> * Listeners are effectively constant once created . Thus , when a listener is updated , an entirely <nl> new listener is created ( with the same listen socket ) . This listener goes through the same <nl> warming process described above for a newly added listener . <nl> - * When a listener is updated or removed , the old listener will be placed into a " draining " state <nl> + * When a listener is removed , the old listener will be placed into a " draining " state <nl> much like when the entire server is drained for restart . Connections owned by the listener will <nl> be gracefully closed ( if possible ) for some period of time before the listener is removed and any <nl> remaining connections are closed . The drain time is set via the : option : ` - - drain - time - s ` option . <nl> + * When a tcp listener is updated , if the new listener contains a subset of filter chains in the old listener , <nl> + the connections owned by these overlapping filter chains remain open . Only the connections owned by the <nl> + removed filter chains will be drained following the above pattern . Note that if any global listener attributes are <nl> + changed , the entire listener ( and all filter chains ) are drained similar to removal above . See <nl> + : ref : ` filter chain only update < filter_chain_only_update > ` for detailed rules to reason about the impacted filter chains . <nl> <nl> . . note : : <nl> <nl> mmm a / docs / root / intro / arch_overview / listeners / listeners_toc . rst <nl> ppp b / docs / root / intro / arch_overview / listeners / listeners_toc . rst <nl> Listeners <nl> <nl> listeners <nl> listener_filters <nl> + network_filter_chain <nl> network_filters <nl> tcp_proxy <nl> udp_proxy <nl> new file mode 100644 <nl> index 00000000000 . . d920deaf303 <nl> mmm / dev / null <nl> ppp b / docs / root / intro / arch_overview / listeners / network_filter_chain . rst <nl> <nl> + . . _arch_overview_network_filter_chain : <nl> + <nl> + Network Filter Chain <nl> + = = = = = = = = = = = = = = = = = = = = <nl> + <nl> + As discussed in the : ref : ` listener < arch_overview_listeners > ` section , network level ( L3 / L4 ) filters <nl> + form the core of Envoy connection handling . <nl> + <nl> + The network filters are chained in a ordered list known as : ref : ` filter chain < envoy_v3_api_msg_config . listener . v3 . FilterChain > ` . <nl> + Each listener has multiple filter chains and an optional : ref : ` default filter chain < envoy_v3_api_field_config . listener . v3 . Listener . default_filter_chain > ` . <nl> + associated with each filter chain . If the best match filter chain cannot be found , the default filter chain will be <nl> + chosen to serve the request . If the default filter chain is not supplied , the connection will be closed . <nl> + <nl> + . . _filter_chain_only_update : <nl> + <nl> + Filter chain only update <nl> + mmmmmmmmmmmmmmmmmmmmmmmm <nl> + <nl> + : ref : ` Filter chains < envoy_v3_api_msg_config . listener . v3 . FilterChain > ` can be updated indepedently . Upon listener config <nl> + update , if the listener manager determines that the listener update is a filter chain only update , the listener update <nl> + will be executed by adding , updating and removing filter chains . The connections owned by these destroying filter chains will <nl> + be drained as described in listener drain . <nl> + <nl> + If the new : ref : ` filter chain < envoy_v3_api_msg_config . listener . v3 . FilterChain > ` and the old : ref : ` filter chain < envoy_v3_api_msg_config . listener . v3 . FilterChain > ` <nl> + is protobuf message equivalent , the corresponding filter chain runtime info survives . The connections owned by the <nl> + survived filter chains remain open . <nl> + <nl> + Not all the listener config updates can be executed by filter chain update . For example , if the listener metadata is <nl> + updated within the new listener config , the new metadata must be picked up by the new filter chains . In this case , the <nl> + entire listener is drained and updated . <nl> mmm a / docs / root / version_history / current . rst <nl> ppp b / docs / root / version_history / current . rst <nl> New Features <nl> * grpc : implemented header value syntax support when defining : ref : ` initial metadata < envoy_v3_api_field_config . core . v3 . GrpcService . initial_metadata > ` for gRPC - based ` ext_authz ` : ref : ` HTTP < envoy_v3_api_field_extensions . filters . http . ext_authz . v3 . ExtAuthz . grpc_service > ` and : ref : ` network < envoy_v3_api_field_extensions . filters . network . ext_authz . v3 . ExtAuthz . grpc_service > ` filters , and : ref : ` ratelimit < envoy_v3_api_field_config . ratelimit . v3 . RateLimitServiceConfig . grpc_service > ` filters . <nl> * hds : added support for delta updates in the : ref : ` HealthCheckSpecifier < envoy_v3_api_msg_service . health . v3 . HealthCheckSpecifier > ` , making only the Endpoints and Health Checkers that changed be reconstructed on receiving a new message , rather than the entire HDS . <nl> * health_check : added option to use : ref : ` no_traffic_healthy_interval < envoy_v3_api_field_config . core . v3 . HealthCheck . no_traffic_healthy_interval > ` which allows a different no traffic interval when the host is healthy . <nl> + * listener : added an optional : ref : ` default filter chain < envoy_v3_api_field_config . listener . v3 . Listener . default_filter_chain > ` . If this field is supplied , and none of the : ref : ` filter_chains < envoy_v3_api_field_config . listener . v3 . Listener . filter_chains > ` matches , this default filter chain is used to serve the connection . <nl> * mongo_proxy : the list of commands to produce metrics for is now : ref : ` configurable < envoy_v3_api_field_extensions . filters . network . mongo_proxy . v3 . MongoProxy . commands > ` . <nl> * ratelimit : added support for use of various : ref : ` metadata < envoy_v3_api_field_config . route . v3 . RateLimit . Action . metadata > ` as a ratelimit action . <nl> * ratelimit : added : ref : ` disable_x_envoy_ratelimited_header < envoy_v3_api_msg_extensions . filters . http . ratelimit . v3 . RateLimit > ` option to disable ` X - Envoy - RateLimited ` header . <nl> mmm a / docs / root / version_history / v1 . 16 . 0 . rst <nl> ppp b / docs / root / version_history / v1 . 16 . 0 . rst <nl> Bug Fixes <nl> * http : fixed bugs in datadog and squash filter ' s handling of responses with no bodies . <nl> * http : made the HeaderValues : : prefix ( ) method const . <nl> * jwt_authn : supports jwt payload without " iss " field . <nl> - * listener : fixed crash at listener inplace update when connetion load balancer is set . <nl> + * listener : fixed crash at listener inplace update when connection load balancer is set . <nl> * rocketmq_proxy : fixed an issue involving incorrect header lengths . In debug mode it causes crash and in release mode it causes underflow . <nl> * thrift_proxy : fixed crashing bug on request overflow . <nl> * udp_proxy : fixed a crash due to UDP packets being processed after listener removal . <nl> mmm a / generated_api_shadow / envoy / config / listener / v3 / listener . proto <nl> ppp b / generated_api_shadow / envoy / config / listener / v3 / listener . proto <nl> message ListenerCollection { <nl> repeated udpa . core . v1 . CollectionEntry entries = 1 ; <nl> } <nl> <nl> - / / [ # next - free - field : 25 ] <nl> + / / [ # next - free - field : 26 ] <nl> message Listener { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . api . v2 . Listener " ; <nl> <nl> message Listener { <nl> / / : ref : ` FAQ entry < faq_how_to_setup_sni > ` . <nl> repeated FilterChain filter_chains = 3 ; <nl> <nl> + / / The default filter chain if none of the filter chain matches . If no default filter chain is supplied , <nl> + / / the connection will be closed . The filter chain match is ignored in this field . <nl> + FilterChain default_filter_chain = 25 ; <nl> + <nl> / / Soft limit on size of the listener ’ s new connection read and write buffers . <nl> / / If unspecified , an implementation defined default is applied ( 1MiB ) . <nl> google . protobuf . UInt32Value per_connection_buffer_limit_bytes = 5 <nl> mmm a / generated_api_shadow / envoy / config / listener / v3 / listener_components . proto <nl> ppp b / generated_api_shadow / envoy / config / listener / v3 / listener_components . proto <nl> message Filter { <nl> / / ` ` www . example . com ` ` , then ` ` * . example . com ` ` , then ` ` * . com ` ` , then any filter <nl> / / chain without ` ` server_names ` ` requirements ) . <nl> / / <nl> + / / A different way to reason about the filter chain matches : <nl> + / / Suppose there exists N filter chains . Prune the filter chain set using the above 8 steps . <nl> + / / In each step , filter chains which most specifically matches the attributes continue to the next step . <nl> + / / The listener guarantees at most 1 filter chain is left after all of the steps . <nl> + / / <nl> + / / Example : <nl> + / / <nl> + / / For destination port , filter chains specifying the destination port of incoming traffic are the <nl> + / / most specific match . If none of the filter chains specifies the exact destination port , the filter <nl> + / / chains which do not specify ports are the most specific match . Filter chains specifying the <nl> + / / wrong port can never be the most specific match . <nl> + / / <nl> / / [ # comment : Implemented rules are kept in the preference order , with deprecated fields <nl> / / listed at the end , because that ' s how we want to list them in the docs . <nl> / / <nl> mmm a / generated_api_shadow / envoy / config / listener / v4alpha / listener . proto <nl> ppp b / generated_api_shadow / envoy / config / listener / v4alpha / listener . proto <nl> message ListenerCollection { <nl> repeated udpa . core . v1 . CollectionEntry entries = 1 ; <nl> } <nl> <nl> - / / [ # next - free - field : 25 ] <nl> + / / [ # next - free - field : 26 ] <nl> message Listener { <nl> option ( udpa . annotations . versioning ) . previous_message_type = " envoy . config . listener . v3 . Listener " ; <nl> <nl> message Listener { <nl> / / : ref : ` FAQ entry < faq_how_to_setup_sni > ` . <nl> repeated FilterChain filter_chains = 3 ; <nl> <nl> + / / The default filter chain if none of the filter chain matches . If no default filter chain is supplied , <nl> + / / the connection will be closed . The filter chain match is ignored in this field . <nl> + FilterChain default_filter_chain = 25 ; <nl> + <nl> / / Soft limit on size of the listener ’ s new connection read and write buffers . <nl> / / If unspecified , an implementation defined default is applied ( 1MiB ) . <nl> google . protobuf . UInt32Value per_connection_buffer_limit_bytes = 5 <nl> mmm a / generated_api_shadow / envoy / config / listener / v4alpha / listener_components . proto <nl> ppp b / generated_api_shadow / envoy / config / listener / v4alpha / listener_components . proto <nl> message Filter { <nl> / / ` ` www . example . com ` ` , then ` ` * . example . com ` ` , then ` ` * . com ` ` , then any filter <nl> / / chain without ` ` server_names ` ` requirements ) . <nl> / / <nl> + / / A different way to reason about the filter chain matches : <nl> + / / Suppose there exists N filter chains . Prune the filter chain set using the above 8 steps . <nl> + / / In each step , filter chains which most specifically matches the attributes continue to the next step . <nl> + / / The listener guarantees at most 1 filter chain is left after all of the steps . <nl> + / / <nl> + / / Example : <nl> + / / <nl> + / / For destination port , filter chains specifying the destination port of incoming traffic are the <nl> + / / most specific match . If none of the filter chains specifies the exact destination port , the filter <nl> + / / chains which do not specify ports are the most specific match . Filter chains specifying the <nl> + / / wrong port can never be the most specific match . <nl> + / / <nl> / / [ # comment : Implemented rules are kept in the preference order , with deprecated fields <nl> / / listed at the end , because that ' s how we want to list them in the docs . <nl> / / <nl> mmm a / source / extensions / filters / listener / tls_inspector / tls_inspector . cc <nl> ppp b / source / extensions / filters / listener / tls_inspector / tls_inspector . cc <nl> <nl> <nl> # include " extensions / transport_sockets / well_known_names . h " <nl> <nl> + # include " absl / strings / str_join . h " <nl> # include " openssl / ssl . h " <nl> <nl> namespace Envoy { <nl> void Filter : : onALPN ( const unsigned char * data , unsigned int len ) { <nl> } <nl> protocols . emplace_back ( reinterpret_cast < const char * > ( CBS_data ( & name ) ) , CBS_len ( & name ) ) ; <nl> } <nl> + ENVOY_LOG ( trace , " tls : onALPN ( ) , ALPN : { } " , absl : : StrJoin ( protocols , " , " ) ) ; <nl> cb_ - > socket ( ) . setRequestedApplicationProtocols ( protocols ) ; <nl> alpn_found_ = true ; <nl> } <nl> mmm a / source / server / filter_chain_manager_impl . cc <nl> ppp b / source / server / filter_chain_manager_impl . cc <nl> bool FilterChainManagerImpl : : isWildcardServerName ( const std : : string & name ) { <nl> return absl : : StartsWith ( name , " * . " ) ; <nl> } <nl> <nl> - void FilterChainManagerImpl : : addFilterChain ( <nl> + void FilterChainManagerImpl : : addFilterChains ( <nl> absl : : Span < const envoy : : config : : listener : : v3 : : FilterChain * const > filter_chain_span , <nl> + const envoy : : config : : listener : : v3 : : FilterChain * default_filter_chain , <nl> FilterChainFactoryBuilder & filter_chain_factory_builder , <nl> FilterChainFactoryContextCreator & context_creator ) { <nl> Cleanup cleanup ( [ this ] ( ) { origin_ = absl : : nullopt ; } ) ; <nl> void FilterChainManagerImpl : : addFilterChain ( <nl> <nl> / / Reject partial wildcards , we don ' t match on them . <nl> for ( const auto & server_name : filter_chain_match . server_names ( ) ) { <nl> - if ( server_name . find ( ' * ' ) ! = std : : string : : npos & & <nl> - ! FilterChainManagerImpl : : isWildcardServerName ( server_name ) ) { <nl> + if ( server_name . find ( ' * ' ) ! = std : : string : : npos & & ! isWildcardServerName ( server_name ) ) { <nl> throw EnvoyException ( <nl> fmt : : format ( " error adding listener ' { } ' : partial wildcards are not supported in " <nl> " \ " server_names \ " " , <nl> void FilterChainManagerImpl : : addFilterChain ( <nl> filter_chain_match . server_names ( ) , filter_chain_match . transport_protocol ( ) , <nl> filter_chain_match . application_protocols ( ) , filter_chain_match . source_type ( ) , source_ips , <nl> filter_chain_match . source_ports ( ) , filter_chain_impl ) ; <nl> + <nl> fc_contexts_ [ * filter_chain ] = filter_chain_impl ; <nl> } <nl> convertIPsToTries ( ) ; <nl> + copyOrRebuildDefaultFilterChain ( default_filter_chain , filter_chain_factory_builder , <nl> + context_creator ) ; <nl> ENVOY_LOG ( debug , " new fc_contexts has { } filter chains , including { } newly built " , <nl> fc_contexts_ . size ( ) , new_filter_chain_size ) ; <nl> } <nl> <nl> + void FilterChainManagerImpl : : copyOrRebuildDefaultFilterChain ( <nl> + const envoy : : config : : listener : : v3 : : FilterChain * default_filter_chain , <nl> + FilterChainFactoryBuilder & filter_chain_factory_builder , <nl> + FilterChainFactoryContextCreator & context_creator ) { <nl> + / / Default filter chain is built exactly once . <nl> + ASSERT ( ! default_filter_chain_message_ . has_value ( ) ) ; <nl> + <nl> + / / Save the default filter chain message . This message could be used in next listener update . <nl> + if ( default_filter_chain = = nullptr ) { <nl> + return ; <nl> + } <nl> + default_filter_chain_message_ = absl : : make_optional ( * default_filter_chain ) ; <nl> + <nl> + / / Origin filter chain manager could be empty if the current is the ancestor . <nl> + const auto * origin = getOriginFilterChainManager ( ) ; <nl> + if ( origin = = nullptr ) { <nl> + default_filter_chain_ = <nl> + filter_chain_factory_builder . buildFilterChain ( * default_filter_chain , context_creator ) ; <nl> + return ; <nl> + } <nl> + <nl> + / / Copy from original filter chain manager , or build new filter chain if the default filter chain <nl> + / / is not equivalent to the one in the original filter chain manager . <nl> + MessageUtil eq ; <nl> + if ( origin - > default_filter_chain_message_ . has_value ( ) & & <nl> + eq ( origin - > default_filter_chain_message_ . value ( ) , * default_filter_chain ) ) { <nl> + default_filter_chain_ = origin - > default_filter_chain_ ; <nl> + } else { <nl> + default_filter_chain_ = <nl> + filter_chain_factory_builder . buildFilterChain ( * default_filter_chain , context_creator ) ; <nl> + } <nl> + } <nl> + <nl> void FilterChainManagerImpl : : addFilterChainForDestinationPorts ( <nl> DestinationPortsMap & destination_ports_map , uint16_t destination_port , <nl> const std : : vector < std : : string > & destination_ips , <nl> const Network : : FilterChain * <nl> FilterChainManagerImpl : : findFilterChain ( const Network : : ConnectionSocket & socket ) const { <nl> const auto & address = socket . localAddress ( ) ; <nl> <nl> + const Network : : FilterChain * best_match_filter_chain = nullptr ; <nl> / / Match on destination port ( only for IP addresses ) . <nl> if ( address - > type ( ) = = Network : : Address : : Type : : Ip ) { <nl> const auto port_match = destination_ports_map_ . find ( address - > ip ( ) - > port ( ) ) ; <nl> if ( port_match ! = destination_ports_map_ . end ( ) ) { <nl> - return findFilterChainForDestinationIP ( * port_match - > second . second , socket ) ; <nl> + best_match_filter_chain = findFilterChainForDestinationIP ( * port_match - > second . second , socket ) ; <nl> + if ( best_match_filter_chain ! = nullptr ) { <nl> + return best_match_filter_chain ; <nl> + } else { <nl> + / / There is entry for specific port but none of the filter chain matches . Instead of <nl> + / / matching catch - all port 0 , the fallback filter chain is returned . <nl> + return default_filter_chain_ . get ( ) ; <nl> + } <nl> } <nl> } <nl> - <nl> - / / Match on catch - all port 0 . <nl> + / / Match on catch - all port 0 if there is no specific port sub tree . <nl> const auto port_match = destination_ports_map_ . find ( 0 ) ; <nl> if ( port_match ! = destination_ports_map_ . end ( ) ) { <nl> - return findFilterChainForDestinationIP ( * port_match - > second . second , socket ) ; <nl> + best_match_filter_chain = findFilterChainForDestinationIP ( * port_match - > second . second , socket ) ; <nl> } <nl> - <nl> - return nullptr ; <nl> + return best_match_filter_chain ! = nullptr <nl> + ? best_match_filter_chain <nl> + / / Neither exact port nor catch - all port matches . Use fallback filter chain . <nl> + : default_filter_chain_ . get ( ) ; <nl> } <nl> <nl> const Network : : FilterChain * FilterChainManagerImpl : : findFilterChainForDestinationIP ( <nl> mmm a / source / server / filter_chain_manager_impl . h <nl> ppp b / source / server / filter_chain_manager_impl . h <nl> class FilterChainManagerImpl : public Network : : FilterChainManager , <nl> <nl> / / Add all filter chains into this manager . During the lifetime of FilterChainManagerImpl this <nl> / / should be called at most once . <nl> - void addFilterChain ( <nl> + void addFilterChains ( <nl> absl : : Span < const envoy : : config : : listener : : v3 : : FilterChain * const > filter_chain_span , <nl> - FilterChainFactoryBuilder & b , FilterChainFactoryContextCreator & context_creator ) ; <nl> + const envoy : : config : : listener : : v3 : : FilterChain * default_filter_chain , <nl> + FilterChainFactoryBuilder & filter_chain_factory_builder , <nl> + FilterChainFactoryContextCreator & context_creator ) ; <nl> + <nl> static bool isWildcardServerName ( const std : : string & name ) ; <nl> <nl> / / Return the current view of filter chains , keyed by filter chain message . Used by the owning <nl> / / listener to calculate the intersection of filter chains with another listener . <nl> const FcContextMap & filterChainsByMessage ( ) const { return fc_contexts_ ; } <nl> + const absl : : optional < envoy : : config : : listener : : v3 : : FilterChain > & <nl> + defaultFilterChainMessage ( ) const { <nl> + return default_filter_chain_message_ ; <nl> + } <nl> + const Network : : DrainableFilterChainSharedPtr & defaultFilterChain ( ) const { <nl> + return default_filter_chain_ ; <nl> + } <nl> <nl> private : <nl> void convertIPsToTries ( ) ; <nl> + <nl> + / / Build default filter chain from filter chain message . Skip the build but copy from original <nl> + / / filter chain manager if the default filter chain message duplicates the message in origin <nl> + / / filter chain manager . Called by addFilterChains ( ) . <nl> + void copyOrRebuildDefaultFilterChain ( <nl> + const envoy : : config : : listener : : v3 : : FilterChain * default_filter_chain , <nl> + FilterChainFactoryBuilder & filter_chain_factory_builder , <nl> + FilterChainFactoryContextCreator & context_creator ) ; <nl> + <nl> using SourcePortsMap = absl : : flat_hash_map < uint16_t , Network : : FilterChainSharedPtr > ; <nl> using SourcePortsMapSharedPtr = std : : shared_ptr < SourcePortsMap > ; <nl> using SourceIPsMap = absl : : flat_hash_map < std : : string , SourcePortsMapSharedPtr > ; <nl> class FilterChainManagerImpl : public Network : : FilterChainManager , <nl> / / detect the filter chains in the intersection of existing listener and new listener . <nl> FcContextMap fc_contexts_ ; <nl> <nl> + absl : : optional < envoy : : config : : listener : : v3 : : FilterChain > default_filter_chain_message_ ; <nl> + / / The optional fallback filter chain if destination_ports_map_ does not find a matched filter <nl> + / / chain . <nl> + Network : : DrainableFilterChainSharedPtr default_filter_chain_ ; <nl> + <nl> / / Mapping of FilterChain ' s configured destination ports , IPs , server names , transport protocols <nl> / / and application protocols , using structures defined above . <nl> DestinationPortsMap destination_ports_map_ ; <nl> + <nl> const Network : : Address : : InstanceConstSharedPtr address_ ; <nl> / / This is the reference to a factory context which all the generations of listener share . <nl> Configuration : : FactoryContext & parent_context_ ; <nl> mmm a / source / server / listener_impl . cc <nl> ppp b / source / server / listener_impl . cc <nl> void ListenerImpl : : buildFilterChains ( ) { <nl> parent_ . server_ . stats ( ) , parent_ . server_ . singletonManager ( ) , parent_ . server_ . threadLocal ( ) , <nl> validation_visitor_ , parent_ . server_ . api ( ) ) ; <nl> transport_factory_context . setInitManager ( * dynamic_init_manager_ ) ; <nl> - / / The init manager is a little messy . Will refactor when filter chain manager could accept <nl> - / / network filter chain update . <nl> - / / TODO ( lambdai ) : create builder from filter_chain_manager to obtain the init manager <nl> ListenerFilterChainFactoryBuilder builder ( * this , transport_factory_context ) ; <nl> - filter_chain_manager_ . addFilterChain ( config_ . filter_chains ( ) , builder , filter_chain_manager_ ) ; <nl> + filter_chain_manager_ . addFilterChains ( <nl> + config_ . filter_chains ( ) , <nl> + config_ . has_default_filter_chain ( ) ? & config_ . default_filter_chain ( ) : nullptr , builder , <nl> + filter_chain_manager_ ) ; <nl> } <nl> <nl> void ListenerImpl : : buildSocketOptions ( ) { <nl> void ListenerImpl : : diffFilterChain ( const ListenerImpl & another_listener , <nl> callback ( * message_and_filter_chain . second ) ; <nl> } <nl> } <nl> + / / Filter chain manager maintains an optional default filter chain besides the filter chains <nl> + / / indexed by message . <nl> + if ( auto eq = MessageUtil ( ) ; <nl> + filter_chain_manager_ . defaultFilterChainMessage ( ) . has_value ( ) & & <nl> + ( ! another_listener . filter_chain_manager_ . defaultFilterChainMessage ( ) . has_value ( ) | | <nl> + ! eq ( * another_listener . filter_chain_manager_ . defaultFilterChainMessage ( ) , <nl> + * filter_chain_manager_ . defaultFilterChainMessage ( ) ) ) ) { <nl> + callback ( * filter_chain_manager_ . defaultFilterChain ( ) ) ; <nl> + } <nl> } <nl> <nl> bool ListenerMessageUtil : : filterChainOnlyChange ( const envoy : : config : : listener : : v3 : : Listener & lhs , <nl> bool ListenerMessageUtil : : filterChainOnlyChange ( const envoy : : config : : listener : : v <nl> differencer . set_repeated_field_comparison ( Protobuf : : util : : MessageDifferencer : : AS_SET ) ; <nl> differencer . IgnoreField ( <nl> envoy : : config : : listener : : v3 : : Listener : : GetDescriptor ( ) - > FindFieldByName ( " filter_chains " ) ) ; <nl> + differencer . IgnoreField ( envoy : : config : : listener : : v3 : : Listener : : GetDescriptor ( ) - > FindFieldByName ( <nl> + " default_filter_chain " ) ) ; <nl> return differencer . Compare ( lhs , rhs ) ; <nl> } <nl> <nl> mmm a / test / integration / xds_integration_test . cc <nl> ppp b / test / integration / xds_integration_test . cc <nl> class LdsInplaceUpdateHttpIntegrationTest <nl> LdsInplaceUpdateHttpIntegrationTest ( ) <nl> : HttpIntegrationTest ( Http : : CodecClient : : Type : : HTTP1 , GetParam ( ) ) { } <nl> <nl> - void initialize ( ) override { <nl> + void inplaceInitialize ( bool add_default_filter_chain = false ) { <nl> autonomous_upstream_ = true ; <nl> setUpstreamCount ( 2 ) ; <nl> <nl> class LdsInplaceUpdateHttpIntegrationTest <nl> std : : string tls_inspector_config = ConfigHelper : : tlsInspectorFilter ( ) ; <nl> config_helper_ . addListenerFilter ( tls_inspector_config ) ; <nl> config_helper_ . addSslConfig ( ) ; <nl> - config_helper_ . addConfigModifier ( [ this ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) { <nl> + config_helper_ . addConfigModifier ( [ this , add_default_filter_chain ] ( <nl> + envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) { <nl> if ( ! use_default_balancer_ ) { <nl> bootstrap . mutable_static_resources ( ) <nl> - > mutable_listeners ( 0 ) <nl> class LdsInplaceUpdateHttpIntegrationTest <nl> bootstrap . mutable_static_resources ( ) - > mutable_clusters ( ) - > Add ( ) - > MergeFrom ( <nl> * bootstrap . mutable_static_resources ( ) - > mutable_clusters ( 0 ) ) ; <nl> bootstrap . mutable_static_resources ( ) - > mutable_clusters ( 1 ) - > set_name ( " cluster_1 " ) ; <nl> + <nl> + if ( add_default_filter_chain ) { <nl> + auto default_filter_chain = bootstrap . mutable_static_resources ( ) <nl> + - > mutable_listeners ( 0 ) <nl> + - > mutable_default_filter_chain ( ) ; <nl> + default_filter_chain - > MergeFrom ( * filter_chain_0 ) ; <nl> + } <nl> } ) ; <nl> <nl> BaseIntegrationTest : : initialize ( ) ; <nl> class LdsInplaceUpdateHttpIntegrationTest <nl> bool use_default_balancer_ { false } ; <nl> } ; <nl> <nl> - / / Verify that http response on filter chain 0 has " Connection : close " header when filter chain 0 <nl> - / / is deleted during the listener update . <nl> + / / Verify that http response on filter chain 1 and default filter chain have " Connection : close " <nl> + / / header when these 2 filter chains are deleted during the listener update . <nl> TEST_P ( LdsInplaceUpdateHttpIntegrationTest , ReloadConfigDeletingFilterChain ) { <nl> - initialize ( ) ; <nl> + inplaceInitialize ( / * add_default_filter_chain = * / true ) ; <nl> <nl> auto codec_client_1 = createHttpCodec ( " alpn1 " ) ; <nl> auto codec_client_0 = createHttpCodec ( " alpn0 " ) ; <nl> - Cleanup cleanup ( [ c1 = codec_client_1 . get ( ) , c0 = codec_client_0 . get ( ) ] ( ) { <nl> + auto codec_client_default = createHttpCodec ( " alpndefault " ) ; <nl> + <nl> + Cleanup cleanup ( [ c1 = codec_client_1 . get ( ) , c0 = codec_client_0 . get ( ) , <nl> + c_default = codec_client_default . get ( ) ] ( ) { <nl> c1 - > close ( ) ; <nl> c0 - > close ( ) ; <nl> + c_default - > close ( ) ; <nl> } ) ; <nl> ConfigHelper new_config_helper ( version_ , * api_ , <nl> MessageUtil : : getJsonStringFromMessage ( config_helper_ . bootstrap ( ) ) ) ; <nl> TEST_P ( LdsInplaceUpdateHttpIntegrationTest , ReloadConfigDeletingFilterChain ) { <nl> [ & ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) - > void { <nl> auto * listener = bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 0 ) ; <nl> listener - > mutable_filter_chains ( ) - > RemoveLast ( ) ; <nl> + listener - > clear_default_filter_chain ( ) ; <nl> } ) ; <nl> <nl> new_config_helper . setLds ( " 1 " ) ; <nl> TEST_P ( LdsInplaceUpdateHttpIntegrationTest , ReloadConfigDeletingFilterChain ) { <nl> test_server_ - > waitForGaugeGe ( " listener_manager . total_filter_chains_draining " , 1 ) ; <nl> <nl> expectResponseHeaderConnectionClose ( * codec_client_1 , true ) ; <nl> + expectResponseHeaderConnectionClose ( * codec_client_default , true ) ; <nl> + <nl> test_server_ - > waitForGaugeGe ( " listener_manager . total_filter_chains_draining " , 0 ) ; <nl> expectResponseHeaderConnectionClose ( * codec_client_0 , false ) ; <nl> expectConnenctionServed ( ) ; <nl> } <nl> <nl> / / Verify that http clients of filter chain 0 survives if new listener config adds new filter <nl> - / / chain 2 . <nl> + / / chain 2 and default filter chain . <nl> TEST_P ( LdsInplaceUpdateHttpIntegrationTest , ReloadConfigAddingFilterChain ) { <nl> - initialize ( ) ; <nl> + inplaceInitialize ( ) ; <nl> test_server_ - > waitForCounterGe ( " listener_manager . listener_create_success " , 1 ) ; <nl> <nl> auto codec_client_0 = createHttpCodec ( " alpn0 " ) ; <nl> Cleanup cleanup0 ( [ c0 = codec_client_0 . get ( ) ] ( ) { c0 - > close ( ) ; } ) ; <nl> ConfigHelper new_config_helper ( version_ , * api_ , <nl> MessageUtil : : getJsonStringFromMessage ( config_helper_ . bootstrap ( ) ) ) ; <nl> - new_config_helper . addConfigModifier ( <nl> - [ & ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) - > void { <nl> - auto * listener = bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 0 ) ; <nl> - listener - > mutable_filter_chains ( ) - > Add ( ) - > MergeFrom ( * listener - > mutable_filter_chains ( 1 ) ) ; <nl> - * listener - > mutable_filter_chains ( 2 ) <nl> - - > mutable_filter_chain_match ( ) <nl> - - > mutable_application_protocols ( 0 ) = " alpn2 " ; <nl> - } ) ; <nl> + new_config_helper . addConfigModifier ( [ & ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) <nl> + - > void { <nl> + auto * listener = bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 0 ) ; <nl> + listener - > mutable_filter_chains ( ) - > Add ( ) - > MergeFrom ( * listener - > mutable_filter_chains ( 1 ) ) ; <nl> + * listener - > mutable_filter_chains ( 2 ) <nl> + - > mutable_filter_chain_match ( ) <nl> + - > mutable_application_protocols ( 0 ) = " alpn2 " ; <nl> + auto default_filter_chain = <nl> + bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 0 ) - > mutable_default_filter_chain ( ) ; <nl> + default_filter_chain - > MergeFrom ( * listener - > mutable_filter_chains ( 1 ) ) ; <nl> + } ) ; <nl> new_config_helper . setLds ( " 1 " ) ; <nl> test_server_ - > waitForCounterGe ( " listener_manager . listener_in_place_updated " , 1 ) ; <nl> test_server_ - > waitForCounterGe ( " listener_manager . listener_create_success " , 2 ) ; <nl> <nl> auto codec_client_2 = createHttpCodec ( " alpn2 " ) ; <nl> - Cleanup cleanup2 ( [ c2 = codec_client_2 . get ( ) ] ( ) { c2 - > close ( ) ; } ) ; <nl> + auto codec_client_default = createHttpCodec ( " alpndefault " ) ; <nl> + <nl> + Cleanup cleanup2 ( [ c2 = codec_client_2 . get ( ) , c_default = codec_client_default . get ( ) ] ( ) { <nl> + c2 - > close ( ) ; <nl> + c_default - > close ( ) ; <nl> + } ) ; <nl> expectResponseHeaderConnectionClose ( * codec_client_2 , false ) ; <nl> + expectResponseHeaderConnectionClose ( * codec_client_default , false ) ; <nl> expectResponseHeaderConnectionClose ( * codec_client_0 , false ) ; <nl> expectConnenctionServed ( ) ; <nl> } <nl> <nl> + / / Verify that http clients of default filter chain is drained and recreated if the default filter <nl> + / / chain updates . <nl> + TEST_P ( LdsInplaceUpdateHttpIntegrationTest , ReloadConfigUpdatingDefaultFilterChain ) { <nl> + inplaceInitialize ( true ) ; <nl> + test_server_ - > waitForCounterGe ( " listener_manager . listener_create_success " , 1 ) ; <nl> + <nl> + auto codec_client_default = createHttpCodec ( " alpndefault " ) ; <nl> + Cleanup cleanup0 ( [ c_default = codec_client_default . get ( ) ] ( ) { c_default - > close ( ) ; } ) ; <nl> + ConfigHelper new_config_helper ( version_ , * api_ , <nl> + MessageUtil : : getJsonStringFromMessage ( config_helper_ . bootstrap ( ) ) ) ; <nl> + new_config_helper . addConfigModifier ( [ & ] ( envoy : : config : : bootstrap : : v3 : : Bootstrap & bootstrap ) <nl> + - > void { <nl> + auto default_filter_chain = <nl> + bootstrap . mutable_static_resources ( ) - > mutable_listeners ( 0 ) - > mutable_default_filter_chain ( ) ; <nl> + default_filter_chain - > set_name ( " default_filter_chain_v2 " ) ; <nl> + } ) ; <nl> + new_config_helper . setLds ( " 1 " ) ; <nl> + test_server_ - > waitForCounterGe ( " listener_manager . listener_in_place_updated " , 1 ) ; <nl> + test_server_ - > waitForCounterGe ( " listener_manager . listener_create_success " , 2 ) ; <nl> + <nl> + auto codec_client_default_v2 = createHttpCodec ( " alpndefaultv2 " ) ; <nl> + <nl> + Cleanup cleanup2 ( [ c_default_v2 = codec_client_default_v2 . get ( ) ] ( ) { c_default_v2 - > close ( ) ; } ) ; <nl> + expectResponseHeaderConnectionClose ( * codec_client_default , true ) ; <nl> + expectResponseHeaderConnectionClose ( * codec_client_default_v2 , false ) ; <nl> + expectConnenctionServed ( ) ; <nl> + } <nl> + <nl> / / Verify that balancer is inherited . Test only default balancer because ExactConnectionBalancer <nl> / / is verified in filter chain add and delete test case . <nl> TEST_P ( LdsInplaceUpdateHttpIntegrationTest , OverlappingFilterChainServesNewConnection ) { <nl> use_default_balancer_ = true ; <nl> - initialize ( ) ; <nl> + inplaceInitialize ( ) ; <nl> <nl> auto codec_client_0 = createHttpCodec ( " alpn0 " ) ; <nl> Cleanup cleanup ( [ c0 = codec_client_0 . get ( ) ] ( ) { c0 - > close ( ) ; } ) ; <nl> TEST_P ( LdsInplaceUpdateHttpIntegrationTest , OverlappingFilterChainServesNewConne <nl> expectConnenctionServed ( ) ; <nl> } <nl> <nl> + / / Verify default filter chain update is filter chain only update . <nl> + TEST_P ( LdsInplaceUpdateHttpIntegrationTest , DefaultFilterChainUpdate ) { } <nl> INSTANTIATE_TEST_SUITE_P ( IpVersions , LdsInplaceUpdateHttpIntegrationTest , <nl> testing : : ValuesIn ( TestEnvironment : : getIpVersionsForTest ( ) ) , <nl> TestUtility : : ipTestParamsToString ) ; <nl> mmm a / test / server / filter_chain_benchmark_test . cc <nl> ppp b / test / server / filter_chain_benchmark_test . cc <nl> const char YamlHeader [ ] = R " EOF ( <nl> socket_address : { address : 127 . 0 . 0 . 1 , port_value : 1234 } <nl> listener_filters : <nl> - name : " envoy . filters . listener . tls_inspector " <nl> - config : { } <nl> + typed_config : { } <nl> filter_chains : <nl> - filter_chain_match : <nl> # empty <nl> transport_socket : <nl> - name : tls <nl> + name : " envoy . transport_sockets . tls " <nl> typed_config : <nl> - " @ type " : type . googleapis . com / envoy . api . v2 . auth . DownstreamTlsContext <nl> + " @ type " : " type . googleapis . com / envoy . extensions . transport_sockets . tls . v3 . DownstreamTlsContext " <nl> common_tls_context : <nl> tls_certificates : <nl> - certificate_chain : { filename : " { { test_rundir } } / test / extensions / transport_sockets / tls / test_data / san_uri_cert . pem " } <nl> const char YamlSingleServer [ ] = R " EOF ( <nl> server_names : " server1 . example . com " <nl> transport_protocol : " tls " <nl> transport_socket : <nl> - name : tls <nl> + name : " envoy . transport_sockets . tls " <nl> typed_config : <nl> - " @ type " : type . googleapis . com / envoy . api . v2 . auth . DownstreamTlsContext <nl> + " @ type " : " type . googleapis . com / envoy . extensions . transport_sockets . tls . v3 . DownstreamTlsContext " <nl> common_tls_context : <nl> tls_certificates : <nl> - certificate_chain : { filename : " { { test_rundir } } / test / extensions / transport_sockets / tls / test_data / san_dns_cert . pem " } <nl> const char YamlSingleDstPortTop [ ] = R " EOF ( <nl> destination_port : ) EOF " ; <nl> const char YamlSingleDstPortBottom [ ] = R " EOF ( <nl> transport_socket : <nl> - name : tls <nl> + name : " envoy . transport_sockets . tls " <nl> typed_config : <nl> - " @ type " : type . googleapis . com / envoy . api . v2 . auth . DownstreamTlsContext <nl> + " @ type " : " type . googleapis . com / envoy . extensions . transport_sockets . tls . v3 . DownstreamTlsContext " <nl> common_tls_context : <nl> tls_certificates : <nl> - certificate_chain : { filename : " { { test_rundir } } / test / extensions / transport_sockets / tls / test_data / san_multiple_dns_cert . pem " } <nl> BENCHMARK_DEFINE_F ( FilterChainBenchmarkFixture , FilterChainManagerBuildTest ) <nl> FilterChainManagerImpl filter_chain_manager { <nl> std : : make_shared < Network : : Address : : Ipv4Instance > ( " 127 . 0 . 0 . 1 " , 1234 ) , factory_context , <nl> init_manager_ } ; <nl> - filter_chain_manager . addFilterChain ( filter_chains_ , dummy_builder_ , filter_chain_manager ) ; <nl> + filter_chain_manager . addFilterChains ( filter_chains_ , nullptr , dummy_builder_ , <nl> + filter_chain_manager ) ; <nl> } <nl> } <nl> <nl> BENCHMARK_DEFINE_F ( FilterChainBenchmarkFixture , FilterChainFindTest ) <nl> std : : make_shared < Network : : Address : : Ipv4Instance > ( " 127 . 0 . 0 . 1 " , 1234 ) , factory_context , <nl> init_manager_ } ; <nl> <nl> - filter_chain_manager . addFilterChain ( filter_chains_ , dummy_builder_ , filter_chain_manager ) ; <nl> + filter_chain_manager . addFilterChains ( filter_chains_ , nullptr , dummy_builder_ , <nl> + filter_chain_manager ) ; <nl> for ( auto _ : state ) { <nl> + UNREFERENCED_PARAMETER ( _ ) ; <nl> for ( int i = 0 ; i < state . range ( 0 ) ; i + + ) { <nl> filter_chain_manager . findFilterChain ( sockets [ i ] ) ; <nl> } <nl> mmm a / test / server / filter_chain_manager_impl_test . cc <nl> ppp b / test / server / filter_chain_manager_impl_test . cc <nl> class FilterChainManagerImplTest : public testing : : Test { <nl> return filter_chain_manager_ . findFilterChain ( * mock_socket ) ; <nl> } <nl> <nl> - void addSingleFilterChainHelper ( const envoy : : config : : listener : : v3 : : FilterChain & filter_chain ) { <nl> - filter_chain_manager_ . addFilterChain ( <nl> + void addSingleFilterChainHelper ( <nl> + const envoy : : config : : listener : : v3 : : FilterChain & filter_chain , <nl> + const envoy : : config : : listener : : v3 : : FilterChain * fallback_filter_chain = nullptr ) { <nl> + filter_chain_manager_ . addFilterChains ( <nl> std : : vector < const envoy : : config : : listener : : v3 : : FilterChain * > { & filter_chain } , <nl> - filter_chain_factory_builder_ , filter_chain_manager_ ) ; <nl> + fallback_filter_chain , filter_chain_factory_builder_ , filter_chain_manager_ ) ; <nl> } <nl> <nl> / / Intermediate states . <nl> class FilterChainManagerImplTest : public testing : : Test { <nl> ) EOF " ; <nl> Init : : ManagerImpl init_manager_ { " for_filter_chain_manager_test " } ; <nl> envoy : : config : : listener : : v3 : : FilterChain filter_chain_template_ ; <nl> + std : : shared_ptr < Network : : MockFilterChain > build_out_filter_chain_ { <nl> + std : : make_shared < Network : : MockFilterChain > ( ) } ; <nl> + envoy : : config : : listener : : v3 : : FilterChain fallback_filter_chain_ ; <nl> + std : : shared_ptr < Network : : MockFilterChain > build_out_fallback_filter_chain_ { <nl> + std : : make_shared < Network : : MockFilterChain > ( ) } ; <nl> + <nl> NiceMock < MockFilterChainFactoryBuilder > filter_chain_factory_builder_ ; <nl> NiceMock < Server : : Configuration : : MockFactoryContext > parent_context_ ; <nl> / / Test target . <nl> TEST_F ( FilterChainManagerImplTest , AddSingleFilterChain ) { <nl> EXPECT_NE ( filter_chain , nullptr ) ; <nl> } <nl> <nl> + TEST_F ( FilterChainManagerImplTest , FilterChainUseFallbackIfNoFilterChainMatches ) { <nl> + / / The build helper will build matchable filter chain and then build the default filter chain . <nl> + EXPECT_CALL ( filter_chain_factory_builder_ , buildFilterChain ( _ , _ ) ) <nl> + . WillOnce ( Return ( build_out_fallback_filter_chain_ ) ) ; <nl> + EXPECT_CALL ( filter_chain_factory_builder_ , buildFilterChain ( _ , _ ) ) <nl> + . WillOnce ( Return ( std : : make_shared < Network : : MockFilterChain > ( ) ) ) <nl> + . RetiresOnSaturation ( ) ; <nl> + addSingleFilterChainHelper ( filter_chain_template_ , & fallback_filter_chain_ ) ; <nl> + <nl> + auto filter_chain = findFilterChainHelper ( 10000 , " 127 . 0 . 0 . 1 " , " " , " tls " , { } , " 8 . 8 . 8 . 8 " , 111 ) ; <nl> + EXPECT_NE ( filter_chain , nullptr ) ; <nl> + auto fallback_filter_chain = <nl> + findFilterChainHelper ( 9999 , " 127 . 0 . 0 . 1 " , " " , " tls " , { } , " 8 . 8 . 8 . 8 " , 111 ) ; <nl> + EXPECT_EQ ( fallback_filter_chain , build_out_fallback_filter_chain_ . get ( ) ) ; <nl> + } <nl> + <nl> TEST_F ( FilterChainManagerImplTest , LookupFilterChainContextByFilterChainMessage ) { <nl> std : : vector < envoy : : config : : listener : : v3 : : FilterChain > filter_chain_messages ; <nl> <nl> for ( int i = 0 ; i < 2 ; i + + ) { <nl> envoy : : config : : listener : : v3 : : FilterChain new_filter_chain = filter_chain_template_ ; <nl> new_filter_chain . set_name ( absl : : StrCat ( " filter_chain_ " , i ) ) ; <nl> - / / For sanity check <nl> + / / For sanity check . <nl> new_filter_chain . mutable_filter_chain_match ( ) - > mutable_destination_port ( ) - > set_value ( 10000 + i ) ; <nl> filter_chain_messages . push_back ( std : : move ( new_filter_chain ) ) ; <nl> } <nl> EXPECT_CALL ( filter_chain_factory_builder_ , buildFilterChain ( _ , _ ) ) . Times ( 2 ) ; <nl> - filter_chain_manager_ . addFilterChain ( <nl> + filter_chain_manager_ . addFilterChains ( <nl> std : : vector < const envoy : : config : : listener : : v3 : : FilterChain * > { & filter_chain_messages [ 0 ] , <nl> & filter_chain_messages [ 1 ] } , <nl> - filter_chain_factory_builder_ , filter_chain_manager_ ) ; <nl> + nullptr , filter_chain_factory_builder_ , filter_chain_manager_ ) ; <nl> } <nl> <nl> TEST_F ( FilterChainManagerImplTest , DuplicateContextsAreNotBuilt ) { <nl> TEST_F ( FilterChainManagerImplTest , DuplicateContextsAreNotBuilt ) { <nl> } <nl> <nl> EXPECT_CALL ( filter_chain_factory_builder_ , buildFilterChain ( _ , _ ) ) . Times ( 1 ) ; <nl> - filter_chain_manager_ . addFilterChain ( <nl> + filter_chain_manager_ . addFilterChains ( <nl> std : : vector < const envoy : : config : : listener : : v3 : : FilterChain * > { & filter_chain_messages [ 0 ] } , <nl> - filter_chain_factory_builder_ , filter_chain_manager_ ) ; <nl> + nullptr , filter_chain_factory_builder_ , filter_chain_manager_ ) ; <nl> <nl> FilterChainManagerImpl new_filter_chain_manager { <nl> std : : make_shared < Network : : Address : : Ipv4Instance > ( " 127 . 0 . 0 . 1 " , 1234 ) , parent_context_ , <nl> TEST_F ( FilterChainManagerImplTest , DuplicateContextsAreNotBuilt ) { <nl> / / The new filter chain manager maintains 3 filter chains , but only 2 filter chain context is <nl> / / built because it reuse the filter chain context in the previous filter chain manager <nl> EXPECT_CALL ( filter_chain_factory_builder_ , buildFilterChain ( _ , _ ) ) . Times ( 2 ) ; <nl> - new_filter_chain_manager . addFilterChain ( <nl> + new_filter_chain_manager . addFilterChains ( <nl> std : : vector < const envoy : : config : : listener : : v3 : : FilterChain * > { <nl> & filter_chain_messages [ 0 ] , & filter_chain_messages [ 1 ] , & filter_chain_messages [ 2 ] } , <nl> - filter_chain_factory_builder_ , new_filter_chain_manager ) ; <nl> + nullptr , filter_chain_factory_builder_ , new_filter_chain_manager ) ; <nl> } <nl> <nl> TEST_F ( FilterChainManagerImplTest , CreatedFilterChainFactoryContextHasIndependentDrainClose ) { <nl> mmm a / test / server / listener_manager_impl_test . cc <nl> ppp b / test / server / listener_manager_impl_test . cc <nl> TEST ( ListenerMessageUtilTest , ListenerMessageHaveDifferentNameNotEquivalent ) { <nl> EXPECT_FALSE ( Server : : ListenerMessageUtil : : filterChainOnlyChange ( listener1 , listener2 ) ) ; <nl> } <nl> <nl> + TEST ( ListenerMessageUtilTest , ListenerDefaultFilterChainChangeIsAlwaysFilterChainOnlyChange ) { <nl> + envoy : : config : : listener : : v3 : : Listener listener1 ; <nl> + listener1 . set_name ( " common " ) ; <nl> + envoy : : config : : listener : : v3 : : FilterChain default_filter_chain_1 ; <nl> + default_filter_chain_1 . set_name ( " 127 . 0 . 0 . 1 " ) ; <nl> + envoy : : config : : listener : : v3 : : Listener listener2 ; <nl> + listener2 . set_name ( " common " ) ; <nl> + envoy : : config : : listener : : v3 : : FilterChain default_filter_chain_2 ; <nl> + default_filter_chain_2 . set_name ( " 127 . 0 . 0 . 2 " ) ; <nl> + <nl> + { <nl> + listener1 . clear_default_filter_chain ( ) ; <nl> + listener2 . clear_default_filter_chain ( ) ; <nl> + EXPECT_TRUE ( Server : : ListenerMessageUtil : : filterChainOnlyChange ( listener1 , listener2 ) ) ; <nl> + } <nl> + { <nl> + * listener1 . mutable_default_filter_chain ( ) = default_filter_chain_1 ; <nl> + listener2 . clear_default_filter_chain ( ) ; <nl> + EXPECT_TRUE ( Server : : ListenerMessageUtil : : filterChainOnlyChange ( listener1 , listener2 ) ) ; <nl> + } <nl> + { <nl> + listener1 . clear_default_filter_chain ( ) ; <nl> + * listener2 . mutable_default_filter_chain ( ) = default_filter_chain_2 ; <nl> + EXPECT_TRUE ( Server : : ListenerMessageUtil : : filterChainOnlyChange ( listener1 , listener2 ) ) ; <nl> + } <nl> + { <nl> + * listener1 . mutable_default_filter_chain ( ) = default_filter_chain_1 ; <nl> + * listener2 . mutable_default_filter_chain ( ) = default_filter_chain_2 ; <nl> + EXPECT_TRUE ( Server : : ListenerMessageUtil : : filterChainOnlyChange ( listener1 , listener2 ) ) ; <nl> + } <nl> + } <nl> + <nl> TEST ( ListenerMessageUtilTest , ListenerMessageHaveDifferentFilterChainsAreEquivalent ) { <nl> envoy : : config : : listener : : v3 : : Listener listener1 ; <nl> listener1 . set_name ( " common " ) ; <nl>
listener : add match all filter chain ( )
envoyproxy/envoy
e62c994fac7ba18babfe2742b5595217ae2569c4
2020-10-16T15:28:58Z
mmm a / hphp / runtime / base / file_repository . cpp <nl> ppp b / hphp / runtime / base / file_repository . cpp <nl> PhpFile : : PhpFile ( const string & fileName , const string & srcRoot , <nl> <nl> PhpFile : : ~ PhpFile ( ) { <nl> always_assert ( getRef ( ) = = 0 ) ; <nl> - if ( m_unit ! = nullptr ) { <nl> + if ( ! memory_profiling & & m_unit ! = nullptr ) { <nl> / / Deleting a Unit can grab a low - ranked lock and we ' re probably <nl> / / at a high rank right now <nl> PendQ : : defer ( new DeferredDeleter < Unit > ( m_unit ) ) ; <nl>
Leak Units when memory profiling to prevent cleanup of Funcs and Classes
facebook/hhvm
8ac589b4b77a65fe1a2521b4ab896bd69013e7f2
2013-07-25T19:10:56Z
mmm a / arangod / CMakeLists . txt <nl> ppp b / arangod / CMakeLists . txt <nl> SET ( ARANGOD_SOURCES <nl> Cache / PlainCache . cpp <nl> Cache / Rebalancer . cpp <nl> Cache / State . cpp <nl> + Cache / Table . cpp <nl> Cache / Transaction . cpp <nl> Cache / TransactionalBucket . cpp <nl> Cache / TransactionalCache . cpp <nl> mmm a / arangod / Cache / Cache . cpp <nl> ppp b / arangod / Cache / Cache . cpp <nl> <nl> # include " Basics / Common . h " <nl> # include " Basics / fasthash . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / Manager . h " <nl> # include " Cache / Metadata . h " <nl> # include " Cache / State . h " <nl> + # include " Cache / Table . h " <nl> + # include " Random / RandomGenerator . h " <nl> <nl> # include < stdint . h > <nl> # include < algorithm > <nl> <nl> # include < list > <nl> # include < thread > <nl> <nl> + # include < iostream > / / TODO <nl> + <nl> using namespace arangodb : : cache ; <nl> <nl> - uint64_t Cache : : _evictionStatsCapacity = 1024 ; <nl> + const uint64_t Cache : : minSize = 16384 ; <nl> + const uint64_t Cache : : minLogSize = 14 ; <nl> + <nl> uint64_t Cache : : _findStatsCapacity = 16384 ; <nl> <nl> Cache : : ConstructionGuard : : ConstructionGuard ( ) { } <nl> CachedValue * Cache : : Finding : : copy ( ) const { <nl> return ( ( _value = = nullptr ) ? nullptr : _value - > copy ( ) ) ; <nl> } <nl> <nl> - Cache : : Cache ( ConstructionGuard guard , Manager * manager , <nl> - Manager : : MetadataItr metadata , bool allowGrowth , <nl> - bool enableWindowedStats ) <nl> + Cache : : Cache ( ConstructionGuard guard , Manager * manager , Metadata metadata , <nl> + std : : shared_ptr < Table > table , bool enableWindowedStats , <nl> + std : : function < Table : : BucketClearer ( Metadata * ) > bucketClearer , <nl> + size_t slotsPerBucket ) <nl> : _state ( ) , <nl> - _allowGrowth ( allowGrowth ) , <nl> - _evictionStats ( _evictionStatsCapacity ) , <nl> - _insertionCount ( 0 ) , <nl> _enableWindowedStats ( enableWindowedStats ) , <nl> _findStats ( nullptr ) , <nl> _findHits ( 0 ) , <nl> _findMisses ( 0 ) , <nl> _manager ( manager ) , <nl> _metadata ( metadata ) , <nl> + _table ( table ) , <nl> + _bucketClearer ( bucketClearer ( & _metadata ) ) , <nl> + _slotsPerBucket ( slotsPerBucket ) , <nl> _openOperations ( 0 ) , <nl> _migrateRequestTime ( std : : chrono : : steady_clock : : now ( ) ) , <nl> - _resizeRequestTime ( std : : chrono : : steady_clock : : now ( ) ) , <nl> - _lastResizeRequestStatus ( true ) { <nl> + _resizeRequestTime ( std : : chrono : : steady_clock : : now ( ) ) { <nl> + _table - > setTypeSpecifics ( _bucketClearer , _slotsPerBucket ) ; <nl> + _table - > enable ( ) ; <nl> if ( _enableWindowedStats ) { <nl> try { <nl> _findStats . reset ( new StatBuffer ( _findStatsCapacity ) ) ; <nl> Cache : : Cache ( ConstructionGuard guard , Manager * manager , <nl> } <nl> } <nl> <nl> - uint64_t Cache : : limit ( ) { <nl> + uint64_t Cache : : size ( ) { <nl> + uint64_t size = 0 ; <nl> + _state . lock ( ) ; <nl> + if ( isOperational ( ) ) { <nl> + _metadata . lock ( ) ; <nl> + size = _metadata . allocatedSize ; <nl> + _metadata . unlock ( ) ; <nl> + } <nl> + _state . unlock ( ) ; <nl> + return size ; <nl> + } <nl> + <nl> + uint64_t Cache : : usageLimit ( ) { <nl> uint64_t limit = 0 ; <nl> _state . lock ( ) ; <nl> if ( isOperational ( ) ) { <nl> - _metadata - > lock ( ) ; <nl> - limit = _metadata - > softLimit ( ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . lock ( ) ; <nl> + limit = _metadata . softUsageLimit ; <nl> + _metadata . unlock ( ) ; <nl> } <nl> _state . unlock ( ) ; <nl> return limit ; <nl> uint64_t Cache : : usage ( ) { <nl> uint64_t usage = 0 ; <nl> _state . lock ( ) ; <nl> if ( isOperational ( ) ) { <nl> - _metadata - > lock ( ) ; <nl> - usage = _metadata - > usage ( ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . lock ( ) ; <nl> + usage = _metadata . usage ; <nl> + _metadata . unlock ( ) ; <nl> } <nl> _state . unlock ( ) ; <nl> return usage ; <nl> std : : pair < double , double > Cache : : hitRates ( ) { <nl> return std : : pair < double , double > ( lifetimeRate , windowedRate ) ; <nl> } <nl> <nl> - void Cache : : disableGrowth ( ) { <nl> - _state . lock ( ) ; <nl> - _allowGrowth = false ; <nl> - _state . unlock ( ) ; <nl> - } <nl> - <nl> - void Cache : : enableGrowth ( ) { <nl> - _state . lock ( ) ; <nl> - _allowGrowth = false ; <nl> - _state . unlock ( ) ; <nl> - } <nl> - <nl> - bool Cache : : resize ( uint64_t requestedLimit ) { <nl> - _state . lock ( ) ; <nl> - bool allowed = isOperational ( ) ; <nl> - bool resized = false ; <nl> - startOperation ( ) ; <nl> - _state . unlock ( ) ; <nl> - <nl> - if ( allowed ) { <nl> - / / wait for previous resizes to finish <nl> - while ( true ) { <nl> - _metadata - > lock ( ) ; <nl> - if ( ! _metadata - > isSet ( State : : Flag : : resizing ) ) { <nl> - _metadata - > unlock ( ) ; <nl> - break ; <nl> - } <nl> - _metadata - > unlock ( ) ; <nl> - } <nl> - <nl> - resized = requestResize ( requestedLimit , false ) ; <nl> - } <nl> - <nl> - endOperation ( ) ; <nl> - return resized ; <nl> - } <nl> - <nl> bool Cache : : isResizing ( ) { <nl> bool resizing = false ; <nl> _state . lock ( ) ; <nl> if ( isOperational ( ) ) { <nl> - _metadata - > lock ( ) ; <nl> - resizing = _metadata - > isSet ( State : : Flag : : resizing ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . lock ( ) ; <nl> + resizing = _metadata . isSet ( State : : Flag : : resizing ) ; <nl> + _metadata . unlock ( ) ; <nl> _state . unlock ( ) ; <nl> } <nl> <nl> bool Cache : : isMigrating ( ) const { <nl> return _state . isSet ( State : : Flag : : migrating ) ; <nl> } <nl> <nl> - bool Cache : : requestResize ( uint64_t requestedLimit , bool internal ) { <nl> - bool resized = false ; <nl> - int64_t lockTries = internal ? 10LL : - 1LL ; <nl> - bool ok = _state . lock ( lockTries ) ; <nl> + void Cache : : requestGrow ( ) { <nl> + bool ok = canResize ( ) ; <nl> if ( ok ) { <nl> - if ( ! internal | | ( _allowGrowth & & ( std : : chrono : : steady_clock : : now ( ) > <nl> - _resizeRequestTime ) ) ) { <nl> - _metadata - > lock ( ) ; <nl> - uint64_t newLimit = <nl> - ( requestedLimit > 0 ) <nl> - ? requestedLimit <nl> - : ( _lastResizeRequestStatus <nl> - ? ( _metadata - > hardLimit ( ) * 2 ) <nl> - : ( static_cast < uint64_t > ( <nl> - static_cast < double > ( _metadata - > hardLimit ( ) ) * <nl> - 1 . 25 ) ) ) ; <nl> - _metadata - > unlock ( ) ; <nl> - auto result = _manager - > requestResize ( _metadata , newLimit ) ; <nl> - _resizeRequestTime = result . second ; <nl> - resized = result . first ; <nl> + ok = _state . lock ( Cache : : triesSlow ) ; <nl> + if ( ok ) { <nl> + if ( std : : chrono : : steady_clock : : now ( ) > _resizeRequestTime ) { <nl> + _metadata . lock ( ) ; <nl> + ok = ! _metadata . isSet ( State : : Flag : : resizing ) ; <nl> + _metadata . unlock ( ) ; <nl> + if ( ok ) { <nl> + std : : tie ( ok , _resizeRequestTime ) = <nl> + _manager - > requestGrow ( shared_from_this ( ) ) ; <nl> + } <nl> + } <nl> + _state . unlock ( ) ; <nl> } <nl> - _state . unlock ( ) ; <nl> } <nl> - return resized ; <nl> } <nl> <nl> void Cache : : requestMigrate ( uint32_t requestedLogSize ) { <nl> - if ( ( + + _insertionCount & 0xFFF ) = = 0 ) { <nl> - auto stats = _evictionStats . getFrequencies ( ) ; <nl> - if ( ( ( stats - > size ( ) = = 1 ) & & <nl> - ( ( * stats ) [ 0 ] . first = = static_cast < uint8_t > ( Stat : : insertEviction ) ) ) | | <nl> - ( ( stats - > size ( ) = = 2 ) & & <nl> - ( ( ( * stats ) [ 0 ] . first = = static_cast < uint8_t > ( Stat : : insertNoEviction ) ) | | <nl> - ( ( * stats ) [ 0 ] . second * 16 > ( * stats ) [ 1 ] . second ) ) ) ) { <nl> - bool ok = _state . lock ( 10LL ) ; <nl> + bool ok = _state . lock ( Cache : : triesGuarantee ) ; <nl> + if ( ok ) { <nl> + if ( ! isMigrating ( ) & & <nl> + ( std : : chrono : : steady_clock : : now ( ) > _migrateRequestTime ) ) { <nl> + _metadata . lock ( ) ; <nl> + ok = ! _metadata . isSet ( State : : Flag : : migrating ) & & <nl> + ( requestedLogSize ! = _table - > logSize ( ) ) ; <nl> + _metadata . unlock ( ) ; <nl> if ( ok ) { <nl> - if ( ! isMigrating ( ) & & <nl> - ( std : : chrono : : steady_clock : : now ( ) > _migrateRequestTime ) ) { <nl> - _metadata - > lock ( ) ; <nl> - uint32_t newLogSize = ( requestedLogSize > 0 ) <nl> - ? requestedLogSize <nl> - : ( _metadata - > logSize ( ) + 1 ) ; <nl> - _metadata - > unlock ( ) ; <nl> - auto result = _manager - > requestMigrate ( _metadata , newLogSize ) ; <nl> - _resizeRequestTime = result . second ; <nl> - if ( result . first ) { <nl> - _evictionStats . clear ( ) ; <nl> - } <nl> - } <nl> - _state . unlock ( ) ; <nl> + std : : tie ( ok , _resizeRequestTime ) = <nl> + _manager - > requestMigrate ( shared_from_this ( ) , requestedLogSize ) ; <nl> } <nl> } <nl> + _state . unlock ( ) ; <nl> } <nl> } <nl> <nl> void Cache : : freeValue ( CachedValue * value ) { <nl> } <nl> <nl> bool Cache : : reclaimMemory ( uint64_t size ) { <nl> - _metadata - > lock ( ) ; <nl> - _metadata - > adjustUsageIfAllowed ( - static_cast < int64_t > ( size ) ) ; <nl> - bool underLimit = ( _metadata - > softLimit ( ) > = _metadata - > usage ( ) ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . lock ( ) ; <nl> + _metadata . adjustUsageIfAllowed ( - static_cast < int64_t > ( size ) ) ; <nl> + bool underLimit = ( _metadata . softUsageLimit > = _metadata . usage ) ; <nl> + _metadata . unlock ( ) ; <nl> <nl> return underLimit ; <nl> } <nl> uint32_t Cache : : hashKey ( void const * key , uint32_t keySize ) const { <nl> fasthash32 ( key , keySize , 0xdeadbeefUL ) ) ; <nl> } <nl> <nl> - void Cache : : recordStat ( Cache : : Stat stat ) { <nl> + void Cache : : recordStat ( Stat stat ) { <nl> switch ( stat ) { <nl> - case Stat : : insertEviction : <nl> - case Stat : : insertNoEviction : { <nl> - _evictionStats . insertRecord ( static_cast < uint8_t > ( stat ) ) ; <nl> - break ; <nl> - } <nl> case Stat : : findHit : { <nl> _findHits + + ; <nl> if ( _enableWindowedStats & & _findStats . get ( ) ! = nullptr ) { <nl> _findStats - > insertRecord ( static_cast < uint8_t > ( Stat : : findHit ) ) ; <nl> } <nl> - _manager - > recordHitStat ( Manager : : Stat : : findHit ) ; <nl> + _manager - > reportHitStat ( Stat : : findHit ) ; <nl> break ; <nl> } <nl> case Stat : : findMiss : { <nl> void Cache : : recordStat ( Cache : : Stat stat ) { <nl> if ( _enableWindowedStats & & _findStats . get ( ) ! = nullptr ) { <nl> _findStats - > insertRecord ( static_cast < uint8_t > ( Stat : : findMiss ) ) ; <nl> } <nl> - _manager - > recordHitStat ( Manager : : Stat : : findMiss ) ; <nl> + _manager - > reportHitStat ( Stat : : findMiss ) ; <nl> break ; <nl> } <nl> default : { break ; } <nl> } <nl> } <nl> <nl> - Manager : : MetadataItr & Cache : : metadata ( ) { return _metadata ; } <nl> + Metadata * Cache : : metadata ( ) { return & _metadata ; } <nl> + <nl> + std : : shared_ptr < Table > Cache : : table ( ) { return _table ; } <nl> <nl> void Cache : : beginShutdown ( ) { <nl> _state . lock ( ) ; <nl> void Cache : : beginShutdown ( ) { <nl> <nl> void Cache : : shutdown ( ) { <nl> _state . lock ( ) ; <nl> - _metadata - > lock ( ) ; <nl> - auto handle = _metadata - > cache ( ) ; / / hold onto self - reference to prevent <nl> + auto handle = shared_from_this ( ) ; / / hold onto self - reference to prevent <nl> / / pre - mature shared_ptr destruction <nl> TRI_ASSERT ( handle . get ( ) = = this ) ; <nl> - _metadata - > unlock ( ) ; <nl> if ( ! _state . isSet ( State : : Flag : : shutdown ) ) { <nl> if ( ! _state . isSet ( State : : Flag : : shuttingDown ) ) { <nl> _state . toggleFlag ( State : : Flag : : shuttingDown ) ; <nl> void Cache : : shutdown ( ) { <nl> <nl> _state . clear ( ) ; <nl> _state . toggleFlag ( State : : Flag : : shutdown ) ; <nl> - clearTables ( ) ; <nl> - _manager - > unregisterCache ( _metadata ) ; <nl> + std : : shared_ptr < Table > extra = <nl> + _table - > setAuxiliary ( std : : shared_ptr < Table > ( nullptr ) ) ; <nl> + if ( extra . get ( ) ! = nullptr ) { <nl> + extra - > clear ( ) ; <nl> + _manager - > reclaimTable ( extra ) ; <nl> + } <nl> + _table - > clear ( ) ; <nl> + _manager - > reclaimTable ( _table ) ; <nl> + _manager - > unregisterCache ( shared_from_this ( ) ) ; <nl> } <nl> + _metadata . lock ( ) ; <nl> + _metadata . changeTable ( 0 ) ; <nl> + _metadata . unlock ( ) ; <nl> _state . unlock ( ) ; <nl> } <nl> <nl> bool Cache : : canResize ( ) { <nl> bool allowed = true ; <nl> _state . lock ( ) ; <nl> if ( isOperational ( ) ) { <nl> - _metadata - > lock ( ) ; <nl> - if ( _metadata - > isSet ( State : : Flag : : resizing ) ) { <nl> + _metadata . lock ( ) ; <nl> + if ( _metadata . isSet ( State : : Flag : : resizing ) | | <nl> + _metadata . isSet ( State : : Flag : : migrating ) ) { <nl> allowed = false ; <nl> } <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . unlock ( ) ; <nl> } else { <nl> allowed = false ; <nl> } <nl> bool Cache : : canResize ( ) { <nl> } <nl> <nl> bool Cache : : canMigrate ( ) { <nl> - bool allowed = true ; <nl> - _state . lock ( ) ; <nl> - if ( isOperational ( ) ) { <nl> - _metadata - > lock ( ) ; <nl> - if ( _metadata - > isSet ( State : : Flag : : migrating ) ) { <nl> + bool allowed = ( _manager - > ioService ( ) ! = nullptr ) ; <nl> + if ( allowed ) { <nl> + _state . lock ( ) ; <nl> + if ( isOperational ( ) ) { <nl> + if ( _state . isSet ( State : : Flag : : migrating ) ) { <nl> + allowed = false ; <nl> + } else { <nl> + _metadata . lock ( ) ; <nl> + if ( _metadata . isSet ( State : : Flag : : migrating ) ) { <nl> + allowed = false ; <nl> + } <nl> + _metadata . unlock ( ) ; <nl> + } <nl> + } else { <nl> allowed = false ; <nl> } <nl> - _metadata - > unlock ( ) ; <nl> - } else { <nl> - allowed = false ; <nl> + _state . unlock ( ) ; <nl> } <nl> - _state . unlock ( ) ; <nl> <nl> return allowed ; <nl> } <nl> + <nl> + bool Cache : : freeMemory ( ) { <nl> + _state . lock ( ) ; <nl> + if ( ! isOperational ( ) ) { <nl> + _state . unlock ( ) ; <nl> + return false ; <nl> + } <nl> + startOperation ( ) ; <nl> + _state . unlock ( ) ; <nl> + <nl> + bool underLimit = reclaimMemory ( 0ULL ) ; <nl> + uint64_t failures = 0 ; <nl> + while ( ! underLimit ) { <nl> + / / pick a random bucket <nl> + uint32_t randomHash = RandomGenerator : : interval ( UINT32_MAX ) ; <nl> + uint64_t reclaimed = freeMemoryFrom ( randomHash ) ; <nl> + <nl> + if ( reclaimed > 0 ) { <nl> + failures = 0 ; <nl> + underLimit = reclaimMemory ( reclaimed ) ; <nl> + } else { <nl> + failures + + ; <nl> + if ( failures > 100 ) { <nl> + _state . lock ( ) ; <nl> + bool shouldQuit = ! isOperational ( ) ; <nl> + _state . unlock ( ) ; <nl> + <nl> + if ( shouldQuit ) { <nl> + break ; <nl> + } else { <nl> + failures = 0 ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + <nl> + endOperation ( ) ; <nl> + return true ; <nl> + } <nl> + <nl> + bool Cache : : migrate ( std : : shared_ptr < Table > newTable ) { <nl> + _state . lock ( ) ; <nl> + if ( ! isOperational ( ) ) { <nl> + _state . unlock ( ) ; <nl> + return false ; <nl> + } <nl> + startOperation ( ) ; <nl> + newTable - > setTypeSpecifics ( _bucketClearer , _slotsPerBucket ) ; <nl> + newTable - > enable ( ) ; <nl> + _table - > setAuxiliary ( newTable ) ; <nl> + TRI_ASSERT ( ! _state . isSet ( State : : Flag : : migrating ) ) ; <nl> + _state . toggleFlag ( State : : Flag : : migrating ) ; <nl> + _state . unlock ( ) ; <nl> + <nl> + / / do the actual migration <nl> + for ( uint32_t i = 0 ; i < _table - > size ( ) ; i + + ) { <nl> + migrateBucket ( _table - > primaryBucket ( i ) , _table - > auxiliaryBuckets ( i ) , <nl> + newTable ) ; <nl> + } <nl> + <nl> + / / swap tables <nl> + _state . lock ( ) ; <nl> + std : : shared_ptr < Table > oldTable = _table ; <nl> + _table = newTable ; <nl> + _state . unlock ( ) ; <nl> + <nl> + / / clear out old table and release it <nl> + std : : shared_ptr < Table > confirm = <nl> + oldTable - > setAuxiliary ( std : : shared_ptr < Table > ( nullptr ) ) ; <nl> + TRI_ASSERT ( confirm . get ( ) = = newTable . get ( ) ) ; <nl> + oldTable - > clear ( ) ; <nl> + _manager - > reclaimTable ( oldTable ) ; <nl> + <nl> + / / unmarking migrating flags <nl> + _state . lock ( ) ; <nl> + _state . toggleFlag ( State : : Flag : : migrating ) ; <nl> + _state . unlock ( ) ; <nl> + _metadata . lock ( ) ; <nl> + _metadata . changeTable ( _table - > memoryUsage ( ) ) ; <nl> + _metadata . toggleFlag ( State : : Flag : : migrating ) ; <nl> + _metadata . unlock ( ) ; <nl> + <nl> + endOperation ( ) ; <nl> + return true ; <nl> + } <nl> mmm a / arangod / Cache / Cache . h <nl> ppp b / arangod / Cache / Cache . h <nl> <nl> <nl> # include " Basics / Common . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / FrequencyBuffer . h " <nl> # include " Cache / Manager . h " <nl> # include " Cache / ManagerTasks . h " <nl> # include " Cache / Metadata . h " <nl> # include " Cache / State . h " <nl> + # include " Cache / Table . h " <nl> <nl> # include < stdint . h > <nl> # include < list > <nl> class Cache : public std : : enable_shared_from_this < Cache > { <nl> public : <nl> typedef FrequencyBuffer < uint8_t > StatBuffer ; <nl> <nl> + static const uint64_t minSize ; <nl> + static const uint64_t minLogSize ; <nl> + <nl> public : <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief A helper class for managing CachedValue lifecycles . <nl> class Cache : public std : : enable_shared_from_this < Cache > { <nl> } ; <nl> <nl> public : <nl> - Cache ( ConstructionGuard guard , Manager * manager , <nl> - Manager : : MetadataItr metadata , bool allowGrowth , <nl> - bool enableWindowedStats ) ; <nl> + Cache ( ConstructionGuard guard , Manager * manager , Metadata metadata , <nl> + std : : shared_ptr < Table > table , bool enableWindowedStats , <nl> + std : : function < Table : : BucketClearer ( Metadata * ) > bucketClearer , <nl> + size_t slotsPerBucket ) ; <nl> virtual ~ Cache ( ) = default ; <nl> <nl> / / primary functionality ; documented in derived classes <nl> class Cache : public std : : enable_shared_from_this < Cache > { <nl> virtual bool blacklist ( void const * key , uint32_t keySize ) = 0 ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Returns the limit on memory usage for this cache in bytes . <nl> + / / / @ brief Returns the total memory usage for this cache in bytes . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint64_t limit ( ) ; <nl> + uint64_t size ( ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Returns the current memory usage for this cache in bytes . <nl> + / / / @ brief Returns the limit on data memory usage for this cache in bytes . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + uint64_t usageLimit ( ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Returns the current data memory usage for this cache in bytes . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> uint64_t usage ( ) ; <nl> <nl> class Cache : public std : : enable_shared_from_this < Cache > { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> std : : pair < double , double > hitRates ( ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Disallows the cache from requesting to be resized when it runs out <nl> - / / / of space . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - void disableGrowth ( ) ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Allows the cache from requesting to be resized when it runs out of <nl> - / / / space . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - void enableGrowth ( ) ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Request that this cache be given a new limit as specified . <nl> - / / / <nl> - / / / If there is enough free memory globally and the cache is not currently <nl> - / / / resizing , the request should be granted . If downsizing the cache , it may <nl> - / / / need to free some memory , which will be done in an asynchronous task . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - bool resize ( uint64_t requestedLimit = 0 ) ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Check whether the cache is currently in the process of resizing . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> bool isResizing ( ) ; <nl> <nl> protected : <nl> - State _state ; <nl> + static constexpr int64_t triesFast = 50 ; <nl> + static constexpr int64_t triesSlow = 10000 ; <nl> + static constexpr int64_t triesGuarantee = - 1 ; <nl> <nl> - / / whether to allow the cache to resize larger when it fills <nl> - bool _allowGrowth ; <nl> + protected : <nl> + State _state ; <nl> <nl> - / / structures to handle statistics <nl> - enum class Stat : uint8_t { <nl> - findHit = 1 , <nl> - findMiss = 2 , <nl> - insertEviction = 3 , <nl> - insertNoEviction = 4 <nl> - } ; <nl> - static uint64_t _evictionStatsCapacity ; <nl> - StatBuffer _evictionStats ; <nl> - std : : atomic < uint64_t > _insertionCount ; <nl> static uint64_t _findStatsCapacity ; <nl> bool _enableWindowedStats ; <nl> std : : unique_ptr < StatBuffer > _findStats ; <nl> class Cache : public std : : enable_shared_from_this < Cache > { <nl> <nl> / / allow communication with manager <nl> Manager * _manager ; <nl> - Manager : : MetadataItr _metadata ; <nl> + Metadata _metadata ; <nl> + <nl> + / / manage the actual table <nl> + std : : shared_ptr < Table > _table ; <nl> + Table : : BucketClearer _bucketClearer ; <nl> + size_t _slotsPerBucket ; <nl> <nl> / / keep track of number of open operations to allow clean shutdown <nl> std : : atomic < uint32_t > _openOperations ; <nl> class Cache : public std : : enable_shared_from_this < Cache > { <nl> / / times to wait until requesting is allowed again <nl> Manager : : time_point _migrateRequestTime ; <nl> Manager : : time_point _resizeRequestTime ; <nl> - bool _lastResizeRequestStatus ; <nl> <nl> / / friend class manager and tasks <nl> friend class FreeMemoryTask ; <nl> class Cache : public std : : enable_shared_from_this < Cache > { <nl> void endOperation ( ) ; <nl> <nl> bool isMigrating ( ) const ; <nl> - bool requestResize ( uint64_t requestedLimit = 0 , bool internal = true ) ; <nl> + void requestGrow ( ) ; <nl> void requestMigrate ( uint32_t requestedLogSize = 0 ) ; <nl> <nl> - void freeValue ( CachedValue * value ) ; <nl> + static void freeValue ( CachedValue * value ) ; <nl> bool reclaimMemory ( uint64_t size ) ; <nl> - virtual void clearTables ( ) = 0 ; <nl> <nl> uint32_t hashKey ( void const * key , uint32_t keySize ) const ; <nl> - void recordStat ( Cache : : Stat stat ) ; <nl> + void recordStat ( Stat stat ) ; <nl> <nl> / / management <nl> - Manager : : MetadataItr & metadata ( ) ; <nl> + Metadata * metadata ( ) ; <nl> + std : : shared_ptr < Table > table ( ) ; <nl> void beginShutdown ( ) ; <nl> void shutdown ( ) ; <nl> bool canResize ( ) ; <nl> bool canMigrate ( ) ; <nl> - virtual bool freeMemory ( ) = 0 ; <nl> - virtual bool migrate ( ) = 0 ; <nl> + bool freeMemory ( ) ; <nl> + bool migrate ( std : : shared_ptr < Table > newTable ) ; <nl> + <nl> + virtual uint64_t freeMemoryFrom ( uint32_t hash ) = 0 ; <nl> + virtual void migrateBucket ( void * sourcePtr , <nl> + std : : unique_ptr < Table : : Subtable > targets , <nl> + std : : shared_ptr < Table > newTable ) = 0 ; <nl> } ; <nl> <nl> } ; / / end namespace cache <nl> mmm a / arangod / Cache / CacheManagerFeature . cpp <nl> ppp b / arangod / Cache / CacheManagerFeature . cpp <nl> using namespace arangodb : : options ; <nl> using namespace arangodb : : rest ; <nl> <nl> Manager * CacheManagerFeature : : MANAGER = nullptr ; <nl> - <nl> - static constexpr uint64_t MIN_REBALANCING_INTERVAL = 500 * 1000 ; <nl> + const uint64_t CacheManagerFeature : : minRebalancingInterval = 500 * 1000 ; <nl> <nl> CacheManagerFeature : : CacheManagerFeature ( <nl> application_features : : ApplicationServer * server ) <nl> void CacheManagerFeature : : collectOptions ( <nl> <nl> void CacheManagerFeature : : validateOptions ( <nl> std : : shared_ptr < options : : ProgramOptions > ) { <nl> - if ( _cacheSize < Manager : : MINIMUM_SIZE ) { <nl> + if ( _cacheSize < Manager : : minSize ) { <nl> LOG_TOPIC ( FATAL , arangodb : : Logger : : FIXME ) <nl> < < " invalid value for ` - - cache . size ' , need at least " <nl> - < < Manager : : MINIMUM_SIZE ; <nl> + < < Manager : : minSize ; <nl> FATAL_ERROR_EXIT ( ) ; <nl> } <nl> <nl> - if ( _cacheSize < ( MIN_REBALANCING_INTERVAL ) ) { <nl> + if ( _cacheSize < ( CacheManagerFeature : : minRebalancingInterval ) ) { <nl> LOG_TOPIC ( FATAL , arangodb : : Logger : : FIXME ) <nl> < < " invalid value for ` - - cache . rebalancing - interval ' , need at least " <nl> - < < ( MIN_REBALANCING_INTERVAL ) ; <nl> + < < ( CacheManagerFeature : : minRebalancingInterval ) ; <nl> FATAL_ERROR_EXIT ( ) ; <nl> } <nl> } <nl> mmm a / arangod / Cache / CacheManagerFeature . h <nl> ppp b / arangod / Cache / CacheManagerFeature . h <nl> class CacheManagerFeature final <nl> void unprepare ( ) override final ; <nl> <nl> private : <nl> + static const uint64_t minRebalancingInterval ; <nl> + <nl> std : : unique_ptr < cache : : Manager > _manager ; <nl> std : : unique_ptr < CacheRebalancerThread > _rebalancer ; <nl> uint64_t _cacheSize ; <nl> new file mode 100644 <nl> index 00000000000 . . 44a24100619 <nl> mmm / dev / null <nl> ppp b / arangod / Cache / Common . h <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / DISCLAIMER <nl> + / / / <nl> + / / / Copyright 2014 - 2017 ArangoDB GmbH , Cologne , Germany <nl> + / / / Copyright 2004 - 2014 triAGENS GmbH , Cologne , Germany <nl> + / / / <nl> + / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / / you may not use this file except in compliance with the License . <nl> + / / / You may obtain a copy of the License at <nl> + / / / <nl> + / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / / <nl> + / / / Unless required by applicable law or agreed to in writing , software <nl> + / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / / See the License for the specific language governing permissions and <nl> + / / / limitations under the License . <nl> + / / / <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> + / / / <nl> + / / / @ author Daniel H . Larkin <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + # ifndef ARANGODB_CACHE_COMMON_H <nl> + # define ARANGODB_CACHE_COMMON_H <nl> + <nl> + # include " Basics / Common . h " <nl> + <nl> + # include < stdint . h > <nl> + <nl> + namespace arangodb { <nl> + namespace cache { <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Common size for all bucket types . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + constexpr size_t BUCKET_SIZE = 64 ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Enum to specify cache types . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + enum CacheType { Plain , Transactional } ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Enum to allow easy statistic recording across classes . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + enum class Stat : uint8_t { findHit = 1 , findMiss = 2 } ; <nl> + <nl> + } ; / / end namespace cache <nl> + } ; / / end namespace arangodb <nl> + <nl> + # endif <nl> mmm a / arangod / Cache / FrequencyBuffer . h <nl> ppp b / arangod / Cache / FrequencyBuffer . h <nl> class FrequencyBuffer { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Initialize with the given capacity . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - FrequencyBuffer ( uint64_t capacity ) : _current ( 0 ) , _cmp ( ) , _empty ( ) { <nl> + FrequencyBuffer ( uint64_t capacity ) <nl> + : _current ( 0 ) , <nl> + _capacity ( 0 ) , <nl> + _mask ( 0 ) , <nl> + _buffer ( nullptr ) , <nl> + _cmp ( ) , <nl> + _empty ( ) { <nl> uint64_t i = 0 ; <nl> for ( ; ( static_cast < uint64_t > ( 1 ) < < i ) < capacity ; i + + ) { <nl> } <nl> class FrequencyBuffer { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Insert an individual event record . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - void insertRecord ( T record ) { <nl> - ( * _buffer ) [ _current + + & _mask ] = record ; <nl> - } <nl> + void insertRecord ( T record ) { ( * _buffer ) [ _current + + & _mask ] = record ; } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Remove all occurrences of the specified event record . <nl> mmm a / arangod / Cache / Manager . cpp <nl> ppp b / arangod / Cache / Manager . cpp <nl> <nl> # include " Basics / asio - helper . h " <nl> # include " Cache / Cache . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / FrequencyBuffer . h " <nl> # include " Cache / ManagerTasks . h " <nl> # include " Cache / Metadata . h " <nl> # include " Cache / PlainCache . h " <nl> # include " Cache / State . h " <nl> + # include " Cache / Table . h " <nl> # include " Cache / Transaction . h " <nl> # include " Cache / TransactionalCache . h " <nl> <nl> <nl> # include < algorithm > <nl> # include < atomic > <nl> # include < chrono > <nl> - # include < list > <nl> # include < memory > <nl> # include < set > <nl> # include < stack > <nl> # include < utility > <nl> <nl> - using namespace arangodb : : cache ; <nl> + # include < iostream > / / TODO <nl> <nl> - uint64_t Manager : : MINIMUM_SIZE = 1024 * 1024 ; <nl> + using namespace arangodb : : cache ; <nl> <nl> - static constexpr size_t TABLE_LOG_SIZE_ADJUSTMENT = 6 ; <nl> - static constexpr size_t MIN_TABLE_LOG_SIZE = 3 ; <nl> - static constexpr size_t MIN_LOG_SIZE = 10 ; <nl> - static constexpr uint64_t MIN_CACHE_SIZE = 1024 ; <nl> - / / use 16 for sizeof std : : list node - - should be valid for most libraries <nl> - static constexpr uint64_t CACHE_RECORD_OVERHEAD = sizeof ( Metadata ) + 16 ; <nl> - / / assume at most 16 slots in each stack - - TODO : check validity <nl> - static constexpr uint64_t TABLE_LISTS_OVERHEAD = 32 * 16 * 8 ; <nl> - static constexpr int64_t TRIES_FAST = 100 ; <nl> + const uint64_t Manager : : minSize = 1024 * 1024 ; <nl> + const uint64_t Manager : : minCacheAllocation = <nl> + Cache : : minSize + Table : : allocationSize ( Table : : minLogSize ) + <nl> + std : : max ( PlainCache : : allocationSize ( true ) , <nl> + TransactionalCache : : allocationSize ( true ) ) + <nl> + Manager : : cacheRecordOverhead ; <nl> + const std : : chrono : : milliseconds Manager : : rebalancingGracePeriod ( 10 ) ; <nl> <nl> bool Manager : : cmp_weak_ptr : : operator ( ) ( <nl> std : : weak_ptr < Cache > const & left , std : : weak_ptr < Cache > const & right ) const { <nl> size_t Manager : : hash_weak_ptr : : operator ( ) ( <nl> Manager : : Manager ( boost : : asio : : io_service * ioService , uint64_t globalLimit , <nl> bool enableWindowedStats ) <nl> : _state ( ) , <nl> - _accessStats ( ( globalLimit > = ( 1024ULL * 1024ULL * 1024ULL ) ) <nl> - ? ( ( 1024ULL * 1024ULL ) / sizeof ( std : : shared_ptr < Cache > ) ) <nl> - : ( globalLimit / 8192ULL ) ) , <nl> + _accessStats ( ( globalLimit > = ( 1024 * 1024 * 1024 ) ) <nl> + ? ( ( 1024 * 1024 ) / sizeof ( std : : weak_ptr < Cache > ) ) <nl> + : ( globalLimit / ( 1024 * sizeof ( std : : weak_ptr < Cache > ) ) ) ) , <nl> _accessCounter ( 0 ) , <nl> _enableWindowedStats ( enableWindowedStats ) , <nl> _findStats ( nullptr ) , <nl> Manager : : Manager ( boost : : asio : : io_service * ioService , uint64_t globalLimit , <nl> _caches ( ) , <nl> _globalSoftLimit ( globalLimit ) , <nl> _globalHardLimit ( globalLimit ) , <nl> - _globalAllocation ( sizeof ( Manager ) + TABLE_LISTS_OVERHEAD + <nl> - _accessStats . memoryUsage ( ) ) , <nl> + _globalHighwaterMark ( <nl> + static_cast < uint64_t > ( Manager : : highwaterMultiplier * <nl> + static_cast < double > ( _globalSoftLimit ) ) ) , <nl> + _fixedAllocation ( sizeof ( Manager ) + Manager : : tableListsOverhead + <nl> + _accessStats . memoryUsage ( ) ) , <nl> + _spareTableAllocation ( 0 ) , <nl> + _globalAllocation ( _fixedAllocation ) , <nl> _transactions ( ) , <nl> _ioService ( ioService ) , <nl> _resizeAttempt ( 0 ) , <nl> _outstandingTasks ( 0 ) , <nl> _rebalancingTasks ( 0 ) , <nl> - _resizingTasks ( 0 ) { <nl> + _resizingTasks ( 0 ) , <nl> + _rebalanceCompleted ( std : : chrono : : steady_clock : : now ( ) - <nl> + Manager : : rebalancingGracePeriod ) { <nl> TRI_ASSERT ( _globalAllocation < _globalSoftLimit ) ; <nl> TRI_ASSERT ( _globalAllocation < _globalHardLimit ) ; <nl> - try { <nl> - _findStats . reset ( new Manager : : FindStatBuffer ( 16384 ) ) ; <nl> - } catch ( std : : bad_alloc ) { <nl> - _findStats . reset ( nullptr ) ; <nl> - _enableWindowedStats = false ; <nl> + if ( enableWindowedStats ) { <nl> + try { <nl> + _findStats . reset ( new Manager : : FindStatBuffer ( 16384 ) ) ; <nl> + _fixedAllocation + = _findStats - > memoryUsage ( ) ; <nl> + _globalAllocation = _fixedAllocation ; <nl> + } catch ( std : : bad_alloc ) { <nl> + _findStats . reset ( nullptr ) ; <nl> + _enableWindowedStats = false ; <nl> + } <nl> } <nl> } <nl> <nl> Manager : : ~ Manager ( ) { shutdown ( ) ; } <nl> <nl> - std : : shared_ptr < Cache > Manager : : createCache ( Manager : : CacheType type , <nl> - uint64_t requestedLimit , <nl> - bool allowGrowth , <nl> - bool enableWindowedStats ) { <nl> + std : : shared_ptr < Cache > Manager : : createCache ( CacheType type , <nl> + bool enableWindowedStats , <nl> + uint64_t maxSize ) { <nl> std : : shared_ptr < Cache > result ( nullptr ) ; <nl> _state . lock ( ) ; <nl> bool allowed = isOperational ( ) ; <nl> - MetadataItr metadata = _caches . end ( ) ; <nl> - _state . unlock ( ) ; <nl> + Metadata metadata ; <nl> + std : : shared_ptr < Table > table ( nullptr ) ; <nl> <nl> if ( allowed ) { <nl> uint64_t fixedSize = 0 ; <nl> std : : shared_ptr < Cache > Manager : : createCache ( Manager : : CacheType type , <nl> default : <nl> break ; <nl> } <nl> - std : : tie ( allowed , metadata ) = registerCache ( requestedLimit , fixedSize ) ; <nl> + std : : tie ( allowed , metadata , table ) = registerCache ( fixedSize , maxSize ) ; <nl> } <nl> <nl> if ( allowed ) { <nl> switch ( type ) { <nl> case CacheType : : Plain : <nl> - result = PlainCache : : create ( this , metadata , allowGrowth , <nl> - enableWindowedStats ) ; <nl> + result = PlainCache : : create ( this , metadata , table , enableWindowedStats ) ; <nl> break ; <nl> case CacheType : : Transactional : <nl> - result = TransactionalCache : : create ( this , metadata , allowGrowth , <nl> + result = TransactionalCache : : create ( this , metadata , table , <nl> enableWindowedStats ) ; <nl> break ; <nl> default : <nl> break ; <nl> } <nl> - metadata - > link ( result ) ; <nl> } <nl> <nl> + if ( result . get ( ) ! = nullptr ) { <nl> + _caches . emplace ( result ) ; <nl> + } <nl> + _state . unlock ( ) ; <nl> + <nl> return result ; <nl> } <nl> <nl> void Manager : : beginShutdown ( ) { <nl> _state . lock ( ) ; <nl> if ( isOperational ( ) ) { <nl> _state . toggleFlag ( State : : Flag : : shuttingDown ) ; <nl> - for ( MetadataItr metadata = _caches . begin ( ) ; metadata ! = _caches . end ( ) ; <nl> - metadata + + ) { <nl> - metadata - > lock ( ) ; <nl> - metadata - > cache ( ) - > beginShutdown ( ) ; <nl> - metadata - > unlock ( ) ; <nl> + for ( auto it = _caches . begin ( ) ; it ! = _caches . end ( ) ; it + + ) { <nl> + std : : shared_ptr < Cache > cache = * it ; <nl> + cache - > beginShutdown ( ) ; <nl> } <nl> } <nl> _state . unlock ( ) ; <nl> void Manager : : shutdown ( ) { <nl> _state . toggleFlag ( State : : Flag : : shuttingDown ) ; <nl> } <nl> while ( ! _caches . empty ( ) ) { <nl> - _caches . begin ( ) - > lock ( ) ; <nl> - std : : shared_ptr < Cache > cache = _caches . begin ( ) - > cache ( ) ; <nl> - _caches . begin ( ) - > unlock ( ) ; <nl> + std : : shared_ptr < Cache > cache = * _caches . begin ( ) ; <nl> _state . unlock ( ) ; <nl> cache - > shutdown ( ) ; <nl> _state . lock ( ) ; <nl> void Manager : : shutdown ( ) { <nl> <nl> / / change global cache limit <nl> bool Manager : : resize ( uint64_t newGlobalLimit ) { <nl> - if ( newGlobalLimit < MINIMUM_SIZE ) { <nl> + _state . lock ( ) ; <nl> + if ( ( newGlobalLimit < Manager : : minSize ) | | <nl> + ( static_cast < uint64_t > ( 0 . 5 * ( 1 . 0 - Manager : : highwaterMultiplier ) * <nl> + static_cast < double > ( newGlobalLimit ) ) < <nl> + _fixedAllocation ) | | <nl> + ( static_cast < uint64_t > ( Manager : : highwaterMultiplier * <nl> + static_cast < double > ( newGlobalLimit ) ) < <nl> + ( _caches . size ( ) * Manager : : minCacheAllocation ) ) ) { <nl> + _state . unlock ( ) ; <nl> return false ; <nl> } <nl> <nl> bool success = true ; <nl> - _state . lock ( ) ; <nl> <nl> if ( ! isOperational ( ) | | globalProcessRunning ( ) ) { <nl> / / shut ( ting ) down or still have another global process running already <nl> success = false ; <nl> } else { <nl> - / / otherwise we need to actually resize <nl> - _state . toggleFlag ( State : : Flag : : resizing ) ; <nl> - internalResize ( newGlobalLimit , true ) ; <nl> + bool done = adjustGlobalLimitsIfAllowed ( newGlobalLimit ) ; <nl> + if ( ! done ) { <nl> + / / otherwise we need to actually resize <nl> + _state . toggleFlag ( State : : Flag : : resizing ) ; <nl> + _globalSoftLimit = newGlobalLimit ; <nl> + _globalHighwaterMark = static_cast < uint64_t > ( <nl> + Manager : : highwaterMultiplier * static_cast < double > ( _globalSoftLimit ) ) ; <nl> + freeUnusedTables ( ) ; <nl> + done = adjustGlobalLimitsIfAllowed ( newGlobalLimit ) ; <nl> + if ( ! done ) { <nl> + rebalance ( true ) ; <nl> + shrinkOvergrownCaches ( TaskEnvironment : : resizing ) ; <nl> + } <nl> + } <nl> } <nl> <nl> _state . unlock ( ) ; <nl> std : : pair < double , double > Manager : : globalHitRates ( ) { <nl> if ( _enableWindowedStats & & _findStats . get ( ) ! = nullptr ) { <nl> auto stats = _findStats - > getFrequencies ( ) ; <nl> if ( stats - > size ( ) = = 1 ) { <nl> - if ( ( * stats ) [ 0 ] . first = = static_cast < uint8_t > ( Manager : : Stat : : findHit ) ) { <nl> + if ( ( * stats ) [ 0 ] . first = = static_cast < uint8_t > ( Stat : : findHit ) ) { <nl> windowedRate = 100 . 0 ; <nl> } else { <nl> windowedRate = 0 . 0 ; <nl> } <nl> } else if ( stats - > size ( ) = = 2 ) { <nl> - if ( ( * stats ) [ 0 ] . first = = static_cast < uint8_t > ( Manager : : Stat : : findHit ) ) { <nl> + if ( ( * stats ) [ 0 ] . first = = static_cast < uint8_t > ( Stat : : findHit ) ) { <nl> currentHits = ( * stats ) [ 0 ] . second ; <nl> currentMisses = ( * stats ) [ 1 ] . second ; <nl> } else { <nl> std : : pair < double , double > Manager : : globalHitRates ( ) { <nl> } <nl> } <nl> <nl> - return std : : pair < double , double > ( lifetimeRate , windowedRate ) ; <nl> + return std : : make_pair ( lifetimeRate , windowedRate ) ; <nl> } <nl> <nl> Transaction * Manager : : beginTransaction ( bool readOnly ) { <nl> Transaction * Manager : : beginTransaction ( bool readOnly ) { <nl> <nl> void Manager : : endTransaction ( Transaction * tx ) { _transactions . end ( tx ) ; } <nl> <nl> - std : : pair < bool , Manager : : MetadataItr > Manager : : registerCache ( <nl> - uint64_t requestedLimit , uint64_t fixedSize ) { <nl> - bool ok = true ; <nl> - uint32_t logSize = 0 ; <nl> - uint32_t tableLogSize = MIN_TABLE_LOG_SIZE ; <nl> - for ( ; ( 1ULL < < logSize ) < requestedLimit ; logSize + + ) { <nl> - } <nl> - uint64_t grantedLimit = 1ULL < < logSize ; <nl> - if ( logSize > ( TABLE_LOG_SIZE_ADJUSTMENT + MIN_TABLE_LOG_SIZE ) ) { <nl> - tableLogSize = logSize - TABLE_LOG_SIZE_ADJUSTMENT ; <nl> - } <nl> + std : : tuple < bool , Metadata , std : : shared_ptr < Table > > Manager : : registerCache ( <nl> + uint64_t fixedSize , uint64_t maxSize ) { <nl> + TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> + Metadata metadata ; <nl> <nl> - _state . lock ( ) ; <nl> - if ( ! isOperational ( ) ) { <nl> - ok = false ; <nl> - } <nl> + std : : shared_ptr < Table > table = leaseTable ( Table : : minLogSize ) ; <nl> + bool ok = ( table . get ( ) ! = nullptr ) ; <nl> <nl> if ( ok ) { <nl> - while ( logSize > = MIN_LOG_SIZE ) { <nl> - uint64_t tableAllocation = <nl> - _tables [ tableLogSize ] . empty ( ) ? tableSize ( tableLogSize ) : 0 ; <nl> - if ( increaseAllowed ( grantedLimit + tableAllocation + <nl> - CACHE_RECORD_OVERHEAD + fixedSize ) ) { <nl> - break ; <nl> - } <nl> - <nl> - grantedLimit > > = 1U ; <nl> - logSize - - ; <nl> - if ( tableLogSize > MIN_TABLE_LOG_SIZE ) { <nl> - tableLogSize - - ; <nl> - } <nl> - } <nl> - <nl> - if ( logSize < MIN_LOG_SIZE ) { <nl> - ok = false ; <nl> + metadata = <nl> + Metadata ( Cache : : minSize , fixedSize , table - > memoryUsage ( ) , maxSize ) ; <nl> + ok = increaseAllowed ( metadata . allocatedSize - table - > memoryUsage ( ) , true ) ; <nl> + if ( ok ) { <nl> + _globalAllocation + = ( metadata . allocatedSize - table - > memoryUsage ( ) ) ; <nl> } <nl> } <nl> <nl> - MetadataItr metadata = _caches . end ( ) ; <nl> - if ( ok ) { <nl> - _globalAllocation + = ( grantedLimit + CACHE_RECORD_OVERHEAD + fixedSize ) ; <nl> - _caches . emplace_front ( grantedLimit ) ; <nl> - metadata = _caches . begin ( ) ; <nl> - metadata - > lock ( ) ; <nl> - leaseTable ( metadata , tableLogSize ) ; <nl> - metadata - > unlock ( ) ; <nl> + if ( ! ok ) { <nl> + reclaimTable ( table , true ) ; <nl> + table . reset ( ) ; <nl> } <nl> - _state . unlock ( ) ; <nl> <nl> - return std : : pair < bool , MetadataItr > ( ok , metadata ) ; <nl> + return std : : make_tuple ( ok , metadata , table ) ; <nl> } <nl> <nl> - void Manager : : unregisterCache ( Manager : : MetadataItr & metadata ) { <nl> + void Manager : : unregisterCache ( std : : shared_ptr < Cache > cache ) { <nl> _state . lock ( ) ; <nl> - <nl> - if ( _caches . empty ( ) ) { <nl> - _state . unlock ( ) ; <nl> - return ; <nl> - } <nl> - <nl> + Metadata * metadata = cache - > metadata ( ) ; <nl> metadata - > lock ( ) ; <nl> - _globalAllocation - = ( metadata - > hardLimit ( ) + CACHE_RECORD_OVERHEAD ) ; <nl> - reclaimTables ( metadata ) ; <nl> - _accessStats . purgeRecord ( metadata - > cache ( ) ) ; <nl> + _globalAllocation - = metadata - > allocatedSize ; <nl> metadata - > unlock ( ) ; <nl> - <nl> - _caches . erase ( metadata ) ; <nl> - <nl> + _caches . erase ( cache ) ; <nl> _state . unlock ( ) ; <nl> } <nl> <nl> - std : : pair < bool , Manager : : time_point > Manager : : requestResize ( <nl> - Manager : : MetadataItr & metadata , uint64_t requestedLimit ) { <nl> + std : : pair < bool , Manager : : time_point > Manager : : requestGrow ( <nl> + std : : shared_ptr < Cache > cache ) { <nl> Manager : : time_point nextRequest = futureTime ( 100 ) ; <nl> bool allowed = false ; <nl> <nl> - bool ok = _state . lock ( TRIES_FAST ) ; <nl> + bool ok = _state . lock ( Manager : : triesSlow ) ; <nl> if ( ok ) { <nl> - if ( isOperational ( ) & & ! _state . isSet ( State : : Flag : : resizing ) ) { <nl> + if ( isOperational ( ) & & ! globalProcessRunning ( ) ) { <nl> + Metadata * metadata = cache - > metadata ( ) ; <nl> metadata - > lock ( ) ; <nl> <nl> - if ( ! metadata - > isSet ( State : : Flag : : resizing ) & & <nl> - ( ( requestedLimit < metadata - > hardLimit ( ) ) | | <nl> - increaseAllowed ( requestedLimit - metadata - > hardLimit ( ) ) ) ) { <nl> - allowed = true ; <nl> - if ( requestedLimit > metadata - > hardLimit ( ) ) { <nl> - / / if cache is growing , let it keep growing quickly <nl> + allowed = ! metadata - > isSet ( State : : Flag : : resizing ) & & <nl> + ! metadata - > isSet ( State : : Flag : : migrating ) ; <nl> + if ( allowed ) { <nl> + if ( metadata - > allocatedSize > = metadata - > deservedSize & & <nl> + pastRebalancingGracePeriod ( ) ) { <nl> + uint64_t increase = <nl> + std : : min ( metadata - > hardUsageLimit / 2 , <nl> + metadata - > maxSize - metadata - > allocatedSize ) ; <nl> + if ( increase > 0 & & increaseAllowed ( increase ) ) { <nl> + uint64_t newLimit = metadata - > allocatedSize + increase ; <nl> + metadata - > adjustDeserved ( newLimit ) ; <nl> + } else { <nl> + allowed = false ; <nl> + } <nl> + } <nl> + <nl> + if ( allowed ) { <nl> nextRequest = std : : chrono : : steady_clock : : now ( ) ; <nl> + resizeCache ( TaskEnvironment : : none , cache , <nl> + metadata - > newLimit ( ) ) ; / / unlocks metadata <nl> + } else { <nl> + metadata - > unlock ( ) ; <nl> } <nl> - resizeCache ( TaskEnvironment : : none , metadata , <nl> - requestedLimit ) ; / / unlocks metadata <nl> - } else { <nl> - metadata - > unlock ( ) ; <nl> } <nl> } <nl> _state . unlock ( ) ; <nl> } <nl> <nl> - return std : : pair < bool , Manager : : time_point > ( allowed , nextRequest ) ; <nl> + return std : : make_pair ( allowed , nextRequest ) ; <nl> } <nl> <nl> std : : pair < bool , Manager : : time_point > Manager : : requestMigrate ( <nl> - Manager : : MetadataItr & metadata , uint32_t requestedLogSize ) { <nl> + std : : shared_ptr < Cache > cache , uint32_t requestedLogSize ) { <nl> Manager : : time_point nextRequest = futureTime ( 100 ) ; <nl> bool allowed = false ; <nl> <nl> - bool ok = _state . lock ( TRIES_FAST ) ; <nl> + bool ok = _state . lock ( Manager : : triesSlow ) ; <nl> if ( ok ) { <nl> - if ( isOperational ( ) & & ! _state . isSet ( State : : Flag : : resizing ) ) { <nl> - if ( ! _tables [ requestedLogSize ] . empty ( ) | | <nl> - increaseAllowed ( tableSize ( requestedLogSize ) ) ) { <nl> - allowed = true ; <nl> + if ( isOperational ( ) & & ! globalProcessRunning ( ) ) { <nl> + Metadata * metadata = cache - > metadata ( ) ; <nl> + metadata - > lock ( ) ; <nl> + <nl> + allowed = ! metadata - > isSet ( State : : Flag : : migrating ) ; <nl> + if ( allowed ) { <nl> + if ( metadata - > tableSize < Table : : allocationSize ( requestedLogSize ) ) { <nl> + uint64_t increase = <nl> + Table : : allocationSize ( requestedLogSize ) - metadata - > tableSize ; <nl> + if ( ( metadata - > allocatedSize + increase > = metadata - > deservedSize ) & & <nl> + pastRebalancingGracePeriod ( ) ) { <nl> + if ( increaseAllowed ( increase ) ) { <nl> + uint64_t newLimit = metadata - > allocatedSize + increase ; <nl> + uint64_t granted = metadata - > adjustDeserved ( newLimit ) ; <nl> + if ( granted < newLimit ) { <nl> + allowed = false ; <nl> + } <nl> + } else { <nl> + allowed = false ; <nl> + } <nl> + } <nl> + } <nl> } <nl> + <nl> if ( allowed ) { <nl> - metadata - > lock ( ) ; <nl> - if ( metadata - > isSet ( State : : Flag : : migrating ) ) { <nl> - allowed = false ; <nl> - metadata - > unlock ( ) ; <nl> - } else { <nl> + / / first find out if cache is allowed to migrate <nl> + allowed = <nl> + metadata - > migrationAllowed ( Table : : allocationSize ( requestedLogSize ) ) ; <nl> + } <nl> + if ( allowed ) { <nl> + / / now find out if we can lease the table <nl> + std : : shared_ptr < Table > table = leaseTable ( requestedLogSize ) ; <nl> + allowed = ( table . get ( ) ! = nullptr ) ; <nl> + if ( allowed ) { <nl> nextRequest = std : : chrono : : steady_clock : : now ( ) ; <nl> - migrateCache ( TaskEnvironment : : none , metadata , <nl> - requestedLogSize ) ; / / unlocks metadata <nl> + migrateCache ( TaskEnvironment : : none , cache , <nl> + table ) ; / / unlocks metadata <nl> } <nl> } <nl> + <nl> + if ( ! allowed ) { <nl> + metadata - > unlock ( ) ; <nl> + } <nl> } <nl> _state . unlock ( ) ; <nl> } <nl> <nl> - return std : : pair < bool , Manager : : time_point > ( allowed , nextRequest ) ; <nl> + return std : : make_pair ( allowed , nextRequest ) ; <nl> } <nl> <nl> void Manager : : reportAccess ( std : : shared_ptr < Cache > cache ) { <nl> - / / if ( ( ( + + _accessCounter ) & static_cast < uint64_t > ( 7 ) ) = = 0 ) { / / record 1 in <nl> + / / if ( ( ( + + _accessCounter ) & static_cast < uint64_t > ( 7 ) ) = = 0 ) { / / record 1 <nl> + / / in <nl> / / 8 <nl> _accessStats . insertRecord ( cache ) ; <nl> / / } <nl> } <nl> <nl> - void Manager : : recordHitStat ( Manager : : Stat stat ) { <nl> + void Manager : : reportHitStat ( Stat stat ) { <nl> switch ( stat ) { <nl> case Stat : : findHit : { <nl> _findHits + + ; <nl> void Manager : : unprepareTask ( Manager : : TaskEnvironment environment ) { <nl> if ( ( - - _rebalancingTasks ) = = 0 ) { <nl> _state . lock ( ) ; <nl> _state . toggleFlag ( State : : Flag : : rebalancing ) ; <nl> + _rebalanceCompleted = std : : chrono : : steady_clock : : now ( ) ; <nl> _state . unlock ( ) ; <nl> } ; <nl> break ; <nl> void Manager : : unprepareTask ( Manager : : TaskEnvironment environment ) { <nl> case TaskEnvironment : : resizing : { <nl> if ( ( - - _resizingTasks ) = = 0 ) { <nl> _state . lock ( ) ; <nl> - internalResize ( _globalSoftLimit , false ) ; <nl> + _state . toggleFlag ( State : : Flag : : resizing ) ; <nl> _state . unlock ( ) ; <nl> } ; <nl> break ; <nl> void Manager : : unprepareTask ( Manager : : TaskEnvironment environment ) { <nl> _outstandingTasks - - ; <nl> } <nl> <nl> - bool Manager : : rebalance ( ) { <nl> - _state . lock ( ) ; <nl> - if ( ! isOperational ( ) | | globalProcessRunning ( ) ) { <nl> - _state . unlock ( ) ; <nl> - return false ; <nl> - } <nl> - <nl> - / / start rebalancing <nl> - _state . toggleFlag ( State : : Flag : : rebalancing ) ; <nl> - <nl> - / / determine strategy <nl> - <nl> - / / allow background tasks if more than 7 / 8ths full <nl> - bool allowTasks = <nl> - _globalAllocation > <nl> - static_cast < uint64_t > ( 0 . 875 * static_cast < double > ( _globalHardLimit ) ) ; <nl> - <nl> - / / be aggressive if more than 3 / 4ths full <nl> - bool beAggressive = <nl> - _globalAllocation > <nl> - static_cast < uint64_t > ( 0 . 75 * static_cast < double > ( _globalHardLimit ) ) ; <nl> - <nl> - / / aim for 3 / 8th with background tasks , 1 / 4th if no tasks but aggressive , no <nl> - / / goal otherwise <nl> - uint64_t goal = <nl> - beAggressive <nl> - ? ( allowTasks ? static_cast < uint64_t > ( <nl> - 0 . 375 * static_cast < double > ( _globalHardLimit ) ) <nl> - : static_cast < uint64_t > ( <nl> - 0 . 25 * static_cast < double > ( _globalHardLimit ) ) ) <nl> - : 0 ; <nl> - <nl> - if ( goal > 0 ) { <nl> - / / get stats on cache access to prioritize freeing from less frequently used <nl> - / / caches first , so more frequently used ones stay large <nl> - std : : shared_ptr < PriorityList > cacheList = priorityList ( ) ; <nl> - <nl> - / / just adjust limits <nl> - uint64_t reclaimed = <nl> - resizeAllCaches ( TaskEnvironment : : rebalancing , cacheList , allowTasks , <nl> - beAggressive , goal ) ; <nl> - _globalAllocation - = reclaimed ; <nl> - } <nl> + bool Manager : : rebalance ( bool onlyCalculate ) { <nl> + if ( ! onlyCalculate ) { <nl> + _state . lock ( ) ; <nl> + if ( ! isOperational ( ) | | globalProcessRunning ( ) ) { <nl> + _state . unlock ( ) ; <nl> + return false ; <nl> + } <nl> <nl> - if ( _rebalancingTasks . load ( ) = = 0 ) { <nl> + / / start rebalancing <nl> _state . toggleFlag ( State : : Flag : : rebalancing ) ; <nl> } <nl> <nl> - _state . unlock ( ) ; <nl> - return true ; <nl> - } <nl> - <nl> - void Manager : : internalResize ( uint64_t newGlobalLimit , bool firstAttempt ) { <nl> - TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> - bool done = false ; <nl> - std : : shared_ptr < PriorityList > cacheList ( nullptr ) ; <nl> - <nl> - if ( firstAttempt ) { <nl> - _resizeAttempt = 0 ; <nl> - } <nl> - <nl> - if ( ! isOperational ( ) ) { <nl> - / / abort resizing process so we can shutdown <nl> - done = true ; <nl> - } <nl> - <nl> - / / if limit is safe , just set it <nl> - if ( ! done ) { <nl> - done = adjustGlobalLimitsIfAllowed ( newGlobalLimit ) ; <nl> - } <nl> - <nl> - / / see if we can free enough from unused tables <nl> - if ( ! done ) { <nl> - freeUnusedTables ( ) ; <nl> - done = adjustGlobalLimitsIfAllowed ( newGlobalLimit ) ; <nl> + / / adjust deservedSize for each cache <nl> + std : : shared_ptr < PriorityList > cacheList = priorityList ( ) ; <nl> + std : : shared_ptr < Cache > cache ; <nl> + double weight ; <nl> + for ( auto pair : ( * cacheList ) ) { <nl> + std : : tie ( cache , weight ) = pair ; <nl> + uint64_t newDeserved = static_cast < uint64_t > ( <nl> + weight * static_cast < double > ( _globalHighwaterMark ) ) ; <nl> + TRI_ASSERT ( newDeserved > = Manager : : minCacheAllocation ) ; <nl> + Metadata * metadata = cache - > metadata ( ) ; <nl> + metadata - > lock ( ) ; <nl> + metadata - > adjustDeserved ( newDeserved ) ; <nl> + metadata - > unlock ( ) ; <nl> } <nl> <nl> - / / must resize individual caches <nl> - if ( ! done ) { <nl> - _globalSoftLimit = newGlobalLimit ; <nl> - <nl> - / / get stats on cache access to prioritize freeing from less frequently used <nl> - / / caches first , so more frequently used ones stay large <nl> - cacheList = priorityList ( ) ; <nl> + if ( ! onlyCalculate ) { <nl> + shrinkOvergrownCaches ( TaskEnvironment : : rebalancing ) ; <nl> <nl> - / / first just adjust limits down to usage <nl> - uint64_t reclaimed = <nl> - resizeAllCaches ( TaskEnvironment : : resizing , cacheList , true , true , <nl> - _globalAllocation - _globalSoftLimit ) ; <nl> - _globalAllocation - = reclaimed ; <nl> - done = adjustGlobalLimitsIfAllowed ( newGlobalLimit ) ; <nl> - } <nl> - <nl> - / / still haven ' t freed enough , now try cutting allocations more aggressively <nl> - / / by allowing use of background tasks to actually free memory from caches <nl> - if ( ! done ) { <nl> - if ( ( _resizeAttempt % 2 ) = = 0 ) { <nl> - resizeAllCaches ( TaskEnvironment : : resizing , cacheList , false , true , <nl> - _globalAllocation - _globalSoftLimit ) ; <nl> - } else { <nl> - migrateAllCaches ( TaskEnvironment : : resizing , cacheList , <nl> - _globalAllocation - _globalSoftLimit ) ; <nl> + if ( _rebalancingTasks . load ( ) = = 0 ) { <nl> + _rebalanceCompleted = std : : chrono : : steady_clock : : now ( ) ; <nl> + _state . toggleFlag ( State : : Flag : : rebalancing ) ; <nl> } <nl> - } <nl> <nl> - if ( ( _resizingTasks . load ( ) = = 0 ) ) { <nl> - _state . toggleFlag ( State : : Flag : : resizing ) ; <nl> + _state . unlock ( ) ; <nl> } <nl> + <nl> + return true ; <nl> } <nl> <nl> - uint64_t Manager : : resizeAllCaches ( Manager : : TaskEnvironment environment , <nl> - std : : shared_ptr < PriorityList > cacheList , <nl> - bool noTasks , bool aggressive , <nl> - uint64_t goal ) { <nl> + void Manager : : shrinkOvergrownCaches ( Manager : : TaskEnvironment environment ) { <nl> TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> - uint64_t reclaimed = 0 ; <nl> - <nl> - for ( std : : shared_ptr < Cache > c : * cacheList ) { <nl> + for ( std : : shared_ptr < Cache > cache : _caches ) { <nl> / / skip this cache if it is already resizing or shutdown ! <nl> - if ( ! c - > canResize ( ) ) { <nl> + if ( ! cache - > canResize ( ) ) { <nl> continue ; <nl> } <nl> <nl> - MetadataItr metadata = c - > metadata ( ) ; <nl> + Metadata * metadata = cache - > metadata ( ) ; <nl> metadata - > lock ( ) ; <nl> <nl> - uint64_t newLimit ; <nl> - if ( aggressive ) { <nl> - newLimit = <nl> - ( noTasks ? metadata - > usage ( ) <nl> - : ( std : : min ) ( metadata - > usage ( ) , metadata - > hardLimit ( ) / 2 ) ) ; <nl> + if ( metadata - > allocatedSize > metadata - > deservedSize ) { <nl> + resizeCache ( environment , cache , metadata - > newLimit ( ) ) ; / / unlocks cache <nl> } else { <nl> - newLimit = ( std : : max ) ( metadata - > usage ( ) , <nl> - ( metadata - > hardLimit ( ) + metadata - > usage ( ) ) / 2 ) ; <nl> - } <nl> - newLimit = ( std : : max ) ( newLimit , MIN_CACHE_SIZE ) ; <nl> - <nl> - reclaimed + = metadata - > hardLimit ( ) - newLimit ; <nl> - resizeCache ( environment , metadata , newLimit ) ; / / unlocks cache <nl> - <nl> - if ( goal > 0 & & reclaimed > = goal ) { <nl> - break ; <nl> - } <nl> - } <nl> - <nl> - return reclaimed ; <nl> - } <nl> - <nl> - uint64_t Manager : : migrateAllCaches ( Manager : : TaskEnvironment environment , <nl> - std : : shared_ptr < PriorityList > cacheList , <nl> - uint64_t goal ) { <nl> - TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> - uint64_t reclaimed = 0 ; <nl> - <nl> - for ( std : : shared_ptr < Cache > c : * cacheList ) { <nl> - / / skip this cache if it is already migrating or shutdown ! <nl> - if ( ! c - > canMigrate ( ) ) { <nl> - continue ; <nl> - } <nl> - <nl> - MetadataItr metadata = c - > metadata ( ) ; <nl> - metadata - > lock ( ) ; <nl> - <nl> - uint32_t logSize = metadata - > logSize ( ) ; <nl> - if ( ( logSize > MIN_TABLE_LOG_SIZE ) & & <nl> - increaseAllowed ( tableSize ( logSize - 1 ) ) ) { <nl> - reclaimed + = ( tableSize ( logSize ) - tableSize ( logSize - 1 ) ) ; <nl> - migrateCache ( environment , metadata , logSize - 1 ) ; / / unlocks metadata <nl> - } <nl> - if ( metadata - > isLocked ( ) ) { <nl> metadata - > unlock ( ) ; <nl> } <nl> - <nl> - if ( goal > 0 & & reclaimed > = goal ) { <nl> - break ; <nl> - } <nl> } <nl> - <nl> - return reclaimed ; <nl> } <nl> <nl> void Manager : : freeUnusedTables ( ) { <nl> TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> for ( size_t i = 0 ; i < 32 ; i + + ) { <nl> while ( ! _tables [ i ] . empty ( ) ) { <nl> - uint8_t * table = _tables [ i ] . top ( ) ; <nl> - delete [ ] table ; <nl> + auto table = _tables [ i ] . top ( ) ; <nl> + _globalAllocation - = table - > memoryUsage ( ) ; <nl> _tables [ i ] . pop ( ) ; <nl> } <nl> } <nl> bool Manager : : adjustGlobalLimitsIfAllowed ( uint64_t newGlobalLimit ) { <nl> return false ; <nl> } <nl> <nl> + _globalHighwaterMark = static_cast < uint64_t > ( <nl> + Manager : : highwaterMultiplier * static_cast < double > ( newGlobalLimit ) ) ; <nl> _globalSoftLimit = newGlobalLimit ; <nl> _globalHardLimit = newGlobalLimit ; <nl> <nl> bool Manager : : adjustGlobalLimitsIfAllowed ( uint64_t newGlobalLimit ) { <nl> } <nl> <nl> void Manager : : resizeCache ( Manager : : TaskEnvironment environment , <nl> - Manager : : MetadataItr & metadata , uint64_t newLimit ) { <nl> + std : : shared_ptr < Cache > cache , uint64_t newLimit ) { <nl> TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> + Metadata * metadata = cache - > metadata ( ) ; <nl> TRI_ASSERT ( metadata - > isLocked ( ) ) ; <nl> <nl> - if ( metadata - > usage ( ) < = newLimit ) { <nl> + if ( metadata - > usage < = newLimit ) { <nl> + uint64_t oldLimit = metadata - > hardUsageLimit ; <nl> + / * std : : cout < < " ( " < < metadata - > softUsageLimit < < " , " <nl> + < < metadata - > hardUsageLimit < < " ) - > " < < newLimit < < " ( " <nl> + < < metadata - > deservedSize < < " , " < < metadata - > maxSize < < " ) " <nl> + < < std : : endl ; * / <nl> bool success = metadata - > adjustLimits ( newLimit , newLimit ) ; <nl> TRI_ASSERT ( success ) ; <nl> metadata - > unlock ( ) ; <nl> + if ( oldLimit > newLimit ) { <nl> + _globalAllocation - = ( oldLimit - newLimit ) ; <nl> + } else { <nl> + _globalAllocation + = ( newLimit - oldLimit ) ; <nl> + } <nl> return ; <nl> } <nl> <nl> - bool success = metadata - > adjustLimits ( newLimit , metadata - > hardLimit ( ) ) ; <nl> + / * std : : cout < < " ( " < < metadata - > softUsageLimit < < " , " <nl> + < < metadata - > hardUsageLimit < < " ) - > " < < newLimit < < " ( " <nl> + < < metadata - > deservedSize < < " , " < < metadata - > maxSize < < " ) " <nl> + < < std : : endl ; * / <nl> + bool success = metadata - > adjustLimits ( newLimit , metadata - > hardUsageLimit ) ; <nl> TRI_ASSERT ( success ) ; <nl> TRI_ASSERT ( ! metadata - > isSet ( State : : Flag : : resizing ) ) ; <nl> metadata - > toggleFlag ( State : : Flag : : resizing ) ; <nl> metadata - > unlock ( ) ; <nl> <nl> - auto task = std : : make_shared < FreeMemoryTask > ( environment , this , metadata ) ; <nl> + auto task = std : : make_shared < FreeMemoryTask > ( environment , this , cache ) ; <nl> bool dispatched = task - > dispatch ( ) ; <nl> if ( ! dispatched ) { <nl> / / TODO : decide what to do if we don ' t have an io_service <nl> + metadata - > lock ( ) ; <nl> + metadata - > toggleFlag ( State : : Flag : : resizing ) ; <nl> + metadata - > unlock ( ) ; <nl> } <nl> } <nl> <nl> void Manager : : migrateCache ( Manager : : TaskEnvironment environment , <nl> - Manager : : MetadataItr & metadata , uint32_t logSize ) { <nl> + std : : shared_ptr < Cache > cache , <nl> + std : : shared_ptr < Table > table ) { <nl> TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> + Metadata * metadata = cache - > metadata ( ) ; <nl> TRI_ASSERT ( metadata - > isLocked ( ) ) ; <nl> <nl> - bool unlocked ; <nl> - try { <nl> - leaseTable ( metadata , logSize ) ; <nl> - TRI_ASSERT ( ! metadata - > isSet ( State : : Flag : : migrating ) ) ; <nl> - metadata - > toggleFlag ( State : : Flag : : migrating ) ; <nl> - metadata - > unlock ( ) ; <nl> - unlocked = true ; <nl> + TRI_ASSERT ( ! metadata - > isSet ( State : : Flag : : migrating ) ) ; <nl> + metadata - > toggleFlag ( State : : Flag : : migrating ) ; <nl> + metadata - > unlock ( ) ; <nl> <nl> - auto task = std : : make_shared < MigrateTask > ( environment , this , metadata ) ; <nl> - bool dispatched = task - > dispatch ( ) ; <nl> - if ( ! dispatched ) { <nl> - / / TODO : decide what to do if we don ' t have an io_service <nl> - metadata - > lock ( ) ; <nl> - reclaimTables ( metadata , true ) ; <nl> - metadata - > unlock ( ) ; <nl> - } <nl> - } catch ( std : : bad_alloc ) { <nl> - if ( unlocked ) { <nl> - metadata - > lock ( ) ; <nl> - } <nl> - if ( metadata - > auxiliaryTable ( ) ! = nullptr ) { <nl> - uint8_t * junk = metadata - > releaseAuxiliaryTable ( ) ; <nl> - delete junk ; <nl> - } <nl> + auto task = std : : make_shared < MigrateTask > ( environment , this , cache , table ) ; <nl> + bool dispatched = task - > dispatch ( ) ; <nl> + if ( ! dispatched ) { <nl> + / / TODO : decide what to do if we don ' t have an io_service <nl> + metadata - > lock ( ) ; <nl> + reclaimTable ( table , true ) ; <nl> + metadata - > toggleFlag ( State : : Flag : : migrating ) ; <nl> metadata - > unlock ( ) ; <nl> } <nl> } <nl> <nl> - void Manager : : leaseTable ( Manager : : MetadataItr & metadata , uint32_t logSize ) { <nl> + std : : shared_ptr < Table > Manager : : leaseTable ( uint32_t logSize ) { <nl> TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> - TRI_ASSERT ( metadata - > isLocked ( ) ) ; <nl> <nl> - uint8_t * table = nullptr ; <nl> + std : : shared_ptr < Table > table ( nullptr ) ; <nl> if ( _tables [ logSize ] . empty ( ) ) { <nl> - table = reinterpret_cast < uint8_t * > ( new PlainBucket [ 1 < < logSize ] ) ; <nl> - memset ( table , 0 , tableSize ( logSize ) ) ; <nl> - _globalAllocation + = tableSize ( logSize ) ; <nl> + if ( increaseAllowed ( Table : : allocationSize ( logSize ) , true ) ) { <nl> + try { <nl> + table = std : : make_shared < Table > ( logSize ) ; <nl> + _globalAllocation + = table - > memoryUsage ( ) ; <nl> + } catch ( std : : bad_alloc ) { <nl> + table . reset ( ) ; <nl> + } <nl> + } <nl> } else { <nl> table = _tables [ logSize ] . top ( ) ; <nl> + _spareTableAllocation - = table - > memoryUsage ( ) ; <nl> _tables [ logSize ] . pop ( ) ; <nl> } <nl> <nl> - / / if main null , main , otherwise auxiliary <nl> - metadata - > grantAuxiliaryTable ( table , logSize ) ; <nl> - if ( metadata - > table ( ) = = nullptr ) { <nl> - metadata - > swapTables ( ) ; <nl> - } <nl> + return table ; <nl> } <nl> <nl> - void Manager : : reclaimTables ( Manager : : MetadataItr & metadata , <nl> - bool auxiliaryOnly ) { <nl> - TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> - TRI_ASSERT ( metadata - > isLocked ( ) ) ; <nl> - <nl> - uint8_t * table ; <nl> - uint32_t logSize ; <nl> - <nl> - logSize = metadata - > auxiliaryLogSize ( ) ; <nl> - table = metadata - > releaseAuxiliaryTable ( ) ; <nl> - if ( table ! = nullptr ) { <nl> - _tables [ logSize ] . push ( table ) ; <nl> + void Manager : : reclaimTable ( std : : shared_ptr < Table > table , bool internal ) { <nl> + TRI_ASSERT ( table . get ( ) ! = nullptr ) ; <nl> + if ( ! internal ) { <nl> + _state . lock ( ) ; <nl> } <nl> <nl> - if ( auxiliaryOnly ) { <nl> - return ; <nl> + uint32_t logSize = table - > logSize ( ) ; <nl> + size_t maxTables = ( logSize < 18 ) ? ( 1 < < ( 18 - logSize ) ) : 1 ; <nl> + if ( ( _tables [ logSize ] . size ( ) < maxTables ) & & <nl> + ( ( table - > memoryUsage ( ) + _spareTableAllocation ) < <nl> + ( ( _globalSoftLimit - _globalHighwaterMark ) / 2 ) ) ) { <nl> + _tables [ logSize ] . emplace ( table ) ; <nl> + _spareTableAllocation + = table - > memoryUsage ( ) ; <nl> + } else { <nl> + _globalAllocation - = table - > memoryUsage ( ) ; <nl> + table . reset ( ) ; <nl> } <nl> <nl> - logSize = metadata - > logSize ( ) ; <nl> - table = metadata - > releaseTable ( ) ; <nl> - if ( table ! = nullptr ) { <nl> - _tables [ logSize ] . push ( table ) ; <nl> + if ( ! internal ) { <nl> + _state . unlock ( ) ; <nl> } <nl> } <nl> <nl> - bool Manager : : increaseAllowed ( uint64_t increase ) const { <nl> + bool Manager : : increaseAllowed ( uint64_t increase , bool privileged ) const { <nl> TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> - if ( _state . isSet ( State : : Flag : : resizing ) & & <nl> - ( _globalAllocation < = _globalSoftLimit ) ) { <nl> - return ( increase < = ( _globalSoftLimit - _globalAllocation ) ) ; <nl> - } <nl> + if ( privileged ) { <nl> + if ( _state . isSet ( State : : Flag : : resizing ) & & <nl> + ( _globalAllocation < = _globalSoftLimit ) ) { <nl> + return ( increase < = ( _globalSoftLimit - _globalAllocation ) ) ; <nl> + } <nl> <nl> - return ( increase < = ( _globalHardLimit - _globalAllocation ) ) ; <nl> - } <nl> + return ( increase < = ( _globalHardLimit - _globalAllocation ) ) ; <nl> + } <nl> <nl> - uint64_t Manager : : tableSize ( uint32_t logSize ) const { <nl> - return ( sizeof ( PlainBucket ) * ( 1ULL < < logSize ) ) ; <nl> + return ( increase < = ( _globalHighwaterMark - _globalAllocation ) ) ; <nl> } <nl> <nl> std : : shared_ptr < Manager : : PriorityList > Manager : : priorityList ( ) { <nl> TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> std : : shared_ptr < PriorityList > list ( new PriorityList ( ) ) ; <nl> list - > reserve ( _caches . size ( ) ) ; <nl> + double minimumWeight = static_cast < double > ( Manager : : minCacheAllocation ) / <nl> + static_cast < double > ( _globalHighwaterMark ) ; <nl> + double uniformMarginalWeight = 0 . 5 / static_cast < double > ( _caches . size ( ) ) ; <nl> + double baseWeight = std : : max ( minimumWeight , uniformMarginalWeight ) ; <nl> + double remainingWeight = <nl> + 1 . 0 - ( baseWeight * static_cast < double > ( _caches . size ( ) ) ) ; <nl> <nl> / / catalog accessed caches <nl> auto stats = _accessStats . getFrequencies ( ) ; <nl> std : : shared_ptr < Manager : : PriorityList > Manager : : priorityList ( ) { <nl> } <nl> <nl> / / gather all unaccessed caches at beginning of list <nl> - for ( MetadataItr m = _caches . begin ( ) ; m ! = _caches . end ( ) ; m + + ) { <nl> - m - > lock ( ) ; <nl> - std : : shared_ptr < Cache > cache = m - > cache ( ) ; <nl> - m - > unlock ( ) ; <nl> - <nl> - auto found = accessed . find ( cache ) ; <nl> + for ( auto it = _caches . begin ( ) ; it ! = _caches . end ( ) ; it + + ) { <nl> + auto found = accessed . find ( * it ) ; <nl> if ( found = = accessed . end ( ) ) { <nl> - list - > emplace_back ( cache ) ; <nl> + list - > emplace_back ( * it , baseWeight ) ; <nl> } <nl> } <nl> <nl> + / / count total accesses to get basis for comparison <nl> + uint64_t totalAccesses = 0 ; <nl> + for ( auto s : * stats ) { <nl> + totalAccesses + = s . second ; <nl> + } <nl> + double normalizer = remainingWeight / static_cast < double > ( totalAccesses ) ; <nl> + <nl> / / gather all accessed caches in order <nl> for ( auto s : * stats ) { <nl> if ( auto cache = s . first . lock ( ) ) { <nl> - list - > emplace_back ( cache ) ; <nl> + double accessWeight = static_cast < double > ( s . second ) * normalizer ; <nl> + list - > emplace_back ( cache , baseWeight + accessWeight ) ; <nl> } <nl> } <nl> <nl> Manager : : time_point Manager : : futureTime ( uint64_t millisecondsFromNow ) { <nl> return ( std : : chrono : : steady_clock : : now ( ) + <nl> std : : chrono : : milliseconds ( millisecondsFromNow ) ) ; <nl> } <nl> + <nl> + bool Manager : : pastRebalancingGracePeriod ( ) const { <nl> + TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> + bool ok = ! _state . isSet ( State : : Flag : : rebalancing ) ; <nl> + if ( ok ) { <nl> + ok = ( std : : chrono : : steady_clock : : now ( ) - _rebalanceCompleted ) > = <nl> + Manager : : rebalancingGracePeriod ; <nl> + } <nl> + <nl> + return ok ; <nl> + } <nl> mmm a / arangod / Cache / Manager . h <nl> ppp b / arangod / Cache / Manager . h <nl> <nl> # include " Basics / Common . h " <nl> # include " Basics / asio - helper . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / FrequencyBuffer . h " <nl> # include " Cache / Metadata . h " <nl> # include " Cache / State . h " <nl> + # include " Cache / Table . h " <nl> # include " Cache / Transaction . h " <nl> # include " Cache / TransactionManager . h " <nl> <nl> # include < stdint . h > <nl> # include < atomic > <nl> # include < chrono > <nl> - # include < list > <nl> # include < memory > <nl> + # include < set > <nl> # include < stack > <nl> # include < utility > <nl> <nl> class Manager { <nl> } ; <nl> <nl> public : <nl> - static uint64_t MINIMUM_SIZE ; <nl> + static const uint64_t minSize ; <nl> typedef FrequencyBuffer < std : : weak_ptr < Cache > , cmp_weak_ptr , hash_weak_ptr > <nl> AccessStatBuffer ; <nl> typedef FrequencyBuffer < uint8_t > FindStatBuffer ; <nl> - typedef std : : vector < std : : shared_ptr < Cache > > PriorityList ; <nl> + typedef std : : vector < std : : pair < std : : shared_ptr < Cache > , double > > PriorityList ; <nl> typedef std : : chrono : : time_point < std : : chrono : : steady_clock > time_point ; <nl> - typedef std : : list < Metadata > : : iterator MetadataItr ; <nl> <nl> public : <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> class Manager { <nl> bool enableWindowedStats = true ) ; <nl> ~ Manager ( ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Enum to specify which type of cache to create . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - enum CacheType { Plain , Transactional } ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Creates an individual cache . <nl> / / / <nl> / / / The type must be specified . It is possible that the cache cannot be <nl> / / / created ( e . g . in situations of extreme memory pressure ) , in which case the <nl> - / / / returned pointer will be nullptr . If there isn ' t enough memory to create a <nl> - / / / cache with the requested limit , the actual limit may be smaller . If the <nl> - / / / third parameter is true , the cache will be allowed to grow if it becomes <nl> - / / / full and memory is available globally ; otherwise the limit given to it by <nl> - / / / the manager is a hard upper limit which may only be adjusted downward . <nl> - / / / This parameter is true by default . It should likely only be set to be <nl> - / / / false for low - priority , short - lived caches . <nl> + / / / returned pointer will be nullptr . If the second parameter is true , then <nl> + / / / windowed stats will be collected . This incurs some memory and overhead and <nl> + / / / but only a slight performance hit . The windowed stats refer to only a <nl> + / / / recent window in time , rather than over the full lifetime of the cache . <nl> + / / / The third parameter controls the maximum size of the cache over its <nl> + / / / lifetime . It should likely only be set to a non - default value for <nl> + / / / infrequently accessed or short - lived caches . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - std : : shared_ptr < Cache > createCache ( Manager : : CacheType type , <nl> - uint64_t requestedLimit , <nl> - bool allowGrowth = true , <nl> - bool enableWindowedStats = false ) ; <nl> + std : : shared_ptr < Cache > createCache ( CacheType type , <nl> + bool enableWindowedStats = false , <nl> + uint64_t maxSize = UINT64_MAX ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Destroy the given cache . <nl> class Manager { <nl> void endTransaction ( Transaction * tx ) ; <nl> <nl> private : <nl> + / / use sizeof ( std : : shared_ptr < Cache > ) + 32 for sizeof <nl> + / / std : : set < std : : shared_ptr < Cache > > node - - should be valid for most libraries <nl> + static constexpr uint64_t cacheRecordOverhead = <nl> + sizeof ( std : : shared_ptr < Cache > ) + 32 ; <nl> + / / assume at most 16 slots in each stack - - TODO : check validity <nl> + static constexpr uint64_t tableListsOverhead = <nl> + 32 * 16 * sizeof ( std : : shared_ptr < Cache > ) ; <nl> + static constexpr int64_t triesFast = 100 ; <nl> + static constexpr int64_t triesSlow = 1000 ; <nl> + <nl> / / simple state variable for locking and other purposes <nl> State _state ; <nl> <nl> class Manager { <nl> std : : atomic < uint64_t > _accessCounter ; <nl> <nl> / / structures to handle hit rate monitoring <nl> - enum class Stat : uint8_t { findHit = 1 , findMiss = 2 } ; <nl> bool _enableWindowedStats ; <nl> std : : unique_ptr < Manager : : FindStatBuffer > _findStats ; <nl> std : : atomic < uint64_t > _findHits ; <nl> std : : atomic < uint64_t > _findMisses ; <nl> <nl> - / / list of metadata objects to keep track of all the registered caches <nl> - std : : list < Metadata > _caches ; <nl> + / / set of pointers to keep track of registered caches <nl> + std : : set < std : : shared_ptr < Cache > > _caches ; <nl> <nl> / / actual tables to lease out <nl> - std : : stack < uint8_t * > _tables [ 32 ] ; <nl> + std : : stack < std : : shared_ptr < Table > > _tables [ 32 ] ; <nl> <nl> / / global statistics <nl> uint64_t _globalSoftLimit ; <nl> uint64_t _globalHardLimit ; <nl> + uint64_t _globalHighwaterMark ; <nl> + uint64_t _fixedAllocation ; <nl> + uint64_t _spareTableAllocation ; <nl> uint64_t _globalAllocation ; <nl> <nl> / / transaction management <nl> class Manager { <nl> std : : atomic < uint64_t > _outstandingTasks ; <nl> std : : atomic < uint64_t > _rebalancingTasks ; <nl> std : : atomic < uint64_t > _resizingTasks ; <nl> + Manager : : time_point _rebalanceCompleted ; <nl> <nl> / / friend class tasks and caches to allow access <nl> friend class Cache ; <nl> friend class FreeMemoryTask ; <nl> + friend class Metadata ; <nl> friend class MigrateTask ; <nl> friend class PlainCache ; <nl> friend class Rebalancer ; <nl> class Manager { <nl> <nl> private : / / used by caches <nl> / / register and unregister individual caches <nl> - std : : pair < bool , Manager : : MetadataItr > registerCache ( uint64_t requestedLimit , <nl> - uint64_t fixedSize ) ; <nl> - void unregisterCache ( Manager : : MetadataItr & metadata ) ; <nl> + std : : tuple < bool , Metadata , std : : shared_ptr < Table > > registerCache ( <nl> + uint64_t fixedSize , uint64_t maxSize ) ; <nl> + void unregisterCache ( std : : shared_ptr < Cache > cache ) ; <nl> <nl> / / allow individual caches to request changes to their allocations <nl> - std : : pair < bool , Manager : : time_point > requestResize ( <nl> - Manager : : MetadataItr & metadata , uint64_t requestedLimit ) ; <nl> + std : : pair < bool , Manager : : time_point > requestGrow ( <nl> + std : : shared_ptr < Cache > cache ) ; <nl> std : : pair < bool , Manager : : time_point > requestMigrate ( <nl> - Manager : : MetadataItr & metadata , uint32_t requestedLogSize ) ; <nl> + std : : shared_ptr < Cache > cache , uint32_t requestedLogSize ) ; <nl> <nl> / / stat reporting <nl> void reportAccess ( std : : shared_ptr < Cache > cache ) ; <nl> - void recordHitStat ( Manager : : Stat stat ) ; <nl> + void reportHitStat ( Stat stat ) ; <nl> <nl> private : / / used internally and by tasks <nl> + static constexpr double highwaterMultiplier = 0 . 8 ; <nl> + static const uint64_t minCacheAllocation ; <nl> + static const std : : chrono : : milliseconds rebalancingGracePeriod ; <nl> + <nl> / / check if shutdown or shutting down <nl> bool isOperational ( ) const ; <nl> / / check if there is already a global process running <nl> class Manager { <nl> void unprepareTask ( TaskEnvironment environment ) ; <nl> <nl> / / periodically run to rebalance allocations globally <nl> - bool rebalance ( ) ; <nl> + bool rebalance ( bool onlyCalculate = false ) ; <nl> <nl> / / helpers for global resizing <nl> - void internalResize ( uint64_t newGlobalLimit , bool firstAttempt ) ; <nl> - uint64_t resizeAllCaches ( TaskEnvironment environment , <nl> - std : : shared_ptr < PriorityList > cacheList , <nl> - bool noTasks , bool aggressive , uint64_t goal ) ; <nl> - uint64_t migrateAllCaches ( TaskEnvironment environment , <nl> - std : : shared_ptr < PriorityList > cacheList , <nl> - uint64_t goal ) ; <nl> + void shrinkOvergrownCaches ( TaskEnvironment environment ) ; <nl> void freeUnusedTables ( ) ; <nl> bool adjustGlobalLimitsIfAllowed ( uint64_t newGlobalLimit ) ; <nl> <nl> / / methods to adjust individual caches <nl> - void resizeCache ( TaskEnvironment environment , Manager : : MetadataItr & metadata , <nl> + void resizeCache ( TaskEnvironment environment , std : : shared_ptr < Cache > cache , <nl> uint64_t newLimit ) ; <nl> - void migrateCache ( TaskEnvironment environment , Manager : : MetadataItr & metadata , <nl> - uint32_t logSize ) ; <nl> - void leaseTable ( Manager : : MetadataItr & metadata , uint32_t logSize ) ; <nl> - void reclaimTables ( Manager : : MetadataItr & metadata , <nl> - bool auxiliaryOnly = false ) ; <nl> + void migrateCache ( TaskEnvironment environment , std : : shared_ptr < Cache > cache , <nl> + std : : shared_ptr < Table > table ) ; <nl> + std : : shared_ptr < Table > leaseTable ( uint32_t logSize ) ; <nl> + void reclaimTable ( std : : shared_ptr < Table > table , bool internal = false ) ; <nl> <nl> / / helpers for individual allocations <nl> - bool increaseAllowed ( uint64_t increase ) const ; <nl> - uint64_t tableSize ( uint32_t logSize ) const ; <nl> + bool increaseAllowed ( uint64_t increase , bool privileged = false ) const ; <nl> <nl> / / helper for lr - accessed heuristics <nl> std : : shared_ptr < PriorityList > priorityList ( ) ; <nl> <nl> / / helper for wait times <nl> Manager : : time_point futureTime ( uint64_t millisecondsFromNow ) ; <nl> + bool pastRebalancingGracePeriod ( ) const ; <nl> } ; <nl> <nl> } ; / / end namespace cache <nl> mmm a / arangod / Cache / ManagerTasks . cpp <nl> ppp b / arangod / Cache / ManagerTasks . cpp <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> FreeMemoryTask : : FreeMemoryTask ( Manager : : TaskEnvironment environment , <nl> - Manager * manager , Manager : : MetadataItr & metadata ) <nl> - : _environment ( environment ) , _manager ( manager ) { <nl> - metadata - > lock ( ) ; <nl> - _cache = metadata - > cache ( ) ; <nl> - metadata - > unlock ( ) ; <nl> - } <nl> + Manager * manager , std : : shared_ptr < Cache > cache ) <nl> + : _environment ( environment ) , _manager ( manager ) , _cache ( cache ) { } <nl> <nl> FreeMemoryTask : : ~ FreeMemoryTask ( ) { } <nl> <nl> void FreeMemoryTask : : run ( ) { <nl> <nl> if ( ran ) { <nl> _manager - > _state . lock ( ) ; <nl> - auto metadata = _cache - > metadata ( ) ; <nl> + Metadata * metadata = _cache - > metadata ( ) ; <nl> metadata - > lock ( ) ; <nl> - uint64_t reclaimed = metadata - > hardLimit ( ) - metadata - > softLimit ( ) ; <nl> - metadata - > adjustLimits ( metadata - > softLimit ( ) , metadata - > softLimit ( ) ) ; <nl> + uint64_t reclaimed = metadata - > hardUsageLimit - metadata - > softUsageLimit ; <nl> + metadata - > adjustLimits ( metadata - > softUsageLimit , metadata - > softUsageLimit ) ; <nl> metadata - > toggleFlag ( State : : Flag : : resizing ) ; <nl> metadata - > unlock ( ) ; <nl> _manager - > _globalAllocation - = reclaimed ; <nl> void FreeMemoryTask : : run ( ) { <nl> } <nl> <nl> MigrateTask : : MigrateTask ( Manager : : TaskEnvironment environment , Manager * manager , <nl> - Manager : : MetadataItr & metadata ) <nl> - : _environment ( environment ) , _manager ( manager ) { <nl> - metadata - > lock ( ) ; <nl> - _cache = metadata - > cache ( ) ; <nl> - metadata - > unlock ( ) ; <nl> - } <nl> + std : : shared_ptr < Cache > cache , <nl> + std : : shared_ptr < Table > table ) <nl> + : _environment ( environment ) , <nl> + _manager ( manager ) , <nl> + _cache ( cache ) , <nl> + _table ( table ) { } <nl> <nl> MigrateTask : : ~ MigrateTask ( ) { } <nl> <nl> bool MigrateTask : : dispatch ( ) { <nl> <nl> void MigrateTask : : run ( ) { <nl> / / do the actual migration <nl> - bool ran = _cache - > migrate ( ) ; <nl> + bool ran = _cache - > migrate ( _table ) ; <nl> <nl> - if ( ran ) { <nl> - _manager - > _state . lock ( ) ; <nl> - auto metadata = _cache - > metadata ( ) ; <nl> + if ( ! ran ) { <nl> + Metadata * metadata = _cache - > metadata ( ) ; <nl> metadata - > lock ( ) ; <nl> - _manager - > reclaimTables ( metadata , true ) ; <nl> metadata - > toggleFlag ( State : : Flag : : migrating ) ; <nl> metadata - > unlock ( ) ; <nl> - _manager - > _state . unlock ( ) ; <nl> + _manager - > reclaimTable ( _table ) ; <nl> } <nl> <nl> _manager - > unprepareTask ( _environment ) ; <nl> mmm a / arangod / Cache / ManagerTasks . h <nl> ppp b / arangod / Cache / ManagerTasks . h <nl> class FreeMemoryTask : public std : : enable_shared_from_this < FreeMemoryTask > { <nl> FreeMemoryTask & operator = ( FreeMemoryTask const & ) = delete ; <nl> <nl> FreeMemoryTask ( Manager : : TaskEnvironment environment , Manager * manager , <nl> - Manager : : MetadataItr & metadata ) ; <nl> + std : : shared_ptr < Cache > ) ; <nl> ~ FreeMemoryTask ( ) ; <nl> <nl> bool dispatch ( ) ; <nl> class MigrateTask : public std : : enable_shared_from_this < MigrateTask > { <nl> Manager : : TaskEnvironment _environment ; <nl> Manager * _manager ; <nl> std : : shared_ptr < Cache > _cache ; <nl> + std : : shared_ptr < Table > _table ; <nl> <nl> public : <nl> MigrateTask ( ) = delete ; <nl> class MigrateTask : public std : : enable_shared_from_this < MigrateTask > { <nl> MigrateTask & operator = ( MigrateTask const & ) = delete ; <nl> <nl> MigrateTask ( Manager : : TaskEnvironment environment , Manager * manager , <nl> - Manager : : MetadataItr & metadata ) ; <nl> + std : : shared_ptr < Cache > , std : : shared_ptr < Table > ) ; <nl> ~ MigrateTask ( ) ; <nl> <nl> bool dispatch ( ) ; <nl> mmm a / arangod / Cache / Metadata . cpp <nl> ppp b / arangod / Cache / Metadata . cpp <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # include " Cache / Metadata . h " <nl> + # include " Basics / Common . h " <nl> # include " Cache / Cache . h " <nl> + # include " Cache / Manager . h " <nl> # include " Cache / State . h " <nl> <nl> + # include < stdint . h > <nl> + # include < algorithm > <nl> # include < atomic > <nl> - # include < cstdint > <nl> + <nl> + # include < iostream > / / TODO <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - Metadata : : Metadata ( uint64_t limit ) <nl> - : _state ( ) , <nl> - _cache ( nullptr ) , <nl> - _usage ( 0 ) , <nl> - _softLimit ( limit ) , <nl> - _hardLimit ( limit ) , <nl> - _table ( nullptr ) , <nl> - _auxiliaryTable ( nullptr ) , <nl> - _logSize ( 0 ) , <nl> - _auxiliaryLogSize ( 0 ) { } <nl> + Metadata : : Metadata ( ) <nl> + : fixedSize ( 0 ) , <nl> + tableSize ( 0 ) , <nl> + maxSize ( 0 ) , <nl> + allocatedSize ( 0 ) , <nl> + deservedSize ( 0 ) , <nl> + usage ( 0 ) , <nl> + softUsageLimit ( 0 ) , <nl> + hardUsageLimit ( 0 ) , <nl> + _state ( ) { } <nl> + <nl> + Metadata : : Metadata ( uint64_t usageLimit , uint64_t fixed , uint64_t table , <nl> + uint64_t max ) <nl> + : fixedSize ( fixed ) , <nl> + tableSize ( table ) , <nl> + maxSize ( max ) , <nl> + allocatedSize ( usageLimit + fixed + table + Manager : : cacheRecordOverhead ) , <nl> + deservedSize ( allocatedSize ) , <nl> + usage ( 0 ) , <nl> + softUsageLimit ( usageLimit ) , <nl> + hardUsageLimit ( usageLimit ) , <nl> + _state ( ) { <nl> + TRI_ASSERT ( allocatedSize < = maxSize ) ; <nl> + } <nl> <nl> Metadata : : Metadata ( Metadata const & other ) <nl> - : _state ( other . _state ) , <nl> - _cache ( other . _cache ) , <nl> - _usage ( other . _usage ) , <nl> - _softLimit ( other . _softLimit ) , <nl> - _hardLimit ( other . _hardLimit ) , <nl> - _table ( other . _table ) , <nl> - _auxiliaryTable ( other . _auxiliaryTable ) , <nl> - _logSize ( other . _logSize ) , <nl> - _auxiliaryLogSize ( other . _auxiliaryLogSize ) { } <nl> - <nl> - void Metadata : : link ( std : : shared_ptr < Cache > cache ) { <nl> - lock ( ) ; <nl> - _cache = cache ; <nl> - unlock ( ) ; <nl> + : fixedSize ( other . fixedSize ) , <nl> + tableSize ( other . tableSize ) , <nl> + maxSize ( other . maxSize ) , <nl> + allocatedSize ( other . allocatedSize ) , <nl> + deservedSize ( other . deservedSize ) , <nl> + usage ( other . usage ) , <nl> + softUsageLimit ( other . softUsageLimit ) , <nl> + hardUsageLimit ( other . hardUsageLimit ) , <nl> + _state ( other . _state ) { } <nl> + <nl> + Metadata & Metadata : : operator = ( Metadata const & other ) { <nl> + if ( this ! = & other ) { <nl> + _state = other . _state ; <nl> + fixedSize = other . fixedSize ; <nl> + tableSize = other . tableSize ; <nl> + maxSize = other . maxSize ; <nl> + allocatedSize = other . allocatedSize ; <nl> + deservedSize = other . deservedSize ; <nl> + usage = other . usage ; <nl> + softUsageLimit = other . softUsageLimit ; <nl> + hardUsageLimit = other . hardUsageLimit ; <nl> + } <nl> + <nl> + return * this ; <nl> } <nl> <nl> void Metadata : : lock ( ) { _state . lock ( ) ; } <nl> void Metadata : : unlock ( ) { <nl> <nl> bool Metadata : : isLocked ( ) const { return _state . isLocked ( ) ; } <nl> <nl> - std : : shared_ptr < Cache > Metadata : : cache ( ) const { <nl> - TRI_ASSERT ( isLocked ( ) ) ; <nl> - return _cache ; <nl> - } <nl> - <nl> - uint32_t Metadata : : logSize ( ) const { <nl> - TRI_ASSERT ( isLocked ( ) ) ; <nl> - return _logSize ; <nl> - } <nl> - <nl> - uint32_t Metadata : : auxiliaryLogSize ( ) const { <nl> - TRI_ASSERT ( isLocked ( ) ) ; <nl> - return _auxiliaryLogSize ; <nl> - } <nl> - <nl> - uint8_t * Metadata : : table ( ) const { <nl> - TRI_ASSERT ( isLocked ( ) ) ; <nl> - return _table ; <nl> - } <nl> - <nl> - uint8_t * Metadata : : auxiliaryTable ( ) const { <nl> - TRI_ASSERT ( isLocked ( ) ) ; <nl> - return _auxiliaryTable ; <nl> - } <nl> - <nl> - uint64_t Metadata : : usage ( ) const { <nl> - TRI_ASSERT ( isLocked ( ) ) ; <nl> - return _usage ; <nl> - } <nl> - <nl> - uint64_t Metadata : : softLimit ( ) const { <nl> - TRI_ASSERT ( isLocked ( ) ) ; <nl> - return _softLimit ; <nl> - } <nl> - <nl> - uint64_t Metadata : : hardLimit ( ) const { <nl> - TRI_ASSERT ( isLocked ( ) ) ; <nl> - return _hardLimit ; <nl> - } <nl> - <nl> bool Metadata : : adjustUsageIfAllowed ( int64_t usageChange ) { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> <nl> if ( usageChange < 0 ) { <nl> - _usage - = static_cast < uint64_t > ( - usageChange ) ; <nl> + usage - = static_cast < uint64_t > ( - usageChange ) ; <nl> return true ; <nl> } <nl> <nl> - if ( ( static_cast < uint64_t > ( usageChange ) + _usage < = _softLimit ) | | <nl> - ( ( _usage > _softLimit ) & & <nl> - ( static_cast < uint64_t > ( usageChange ) + _usage < = _hardLimit ) ) ) { <nl> - _usage + = static_cast < uint64_t > ( usageChange ) ; <nl> + if ( ( static_cast < uint64_t > ( usageChange ) + usage < = softUsageLimit ) | | <nl> + ( ( usage > softUsageLimit ) & & <nl> + ( static_cast < uint64_t > ( usageChange ) + usage < = hardUsageLimit ) ) ) { <nl> + usage + = static_cast < uint64_t > ( usageChange ) ; <nl> return true ; <nl> } <nl> <nl> bool Metadata : : adjustUsageIfAllowed ( int64_t usageChange ) { <nl> <nl> bool Metadata : : adjustLimits ( uint64_t softLimit , uint64_t hardLimit ) { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> + uint64_t fixed = tableSize + fixedSize + Manager : : cacheRecordOverhead ; <nl> + auto approve = [ & ] ( ) - > bool { <nl> + softUsageLimit = softLimit ; <nl> + hardUsageLimit = hardLimit ; <nl> + allocatedSize = hardUsageLimit + fixed ; <nl> + <nl> + return true ; <nl> + } ; <nl> <nl> - if ( hardLimit < _usage ) { <nl> - return false ; <nl> + / / special case : start shrink to minimum , ignore deserved / max ( table may be <nl> + / / too big , should shrink during process ) <nl> + if ( ( softLimit = = Cache : : minSize ) & & hardLimit = = hardUsageLimit ) { <nl> + return approve ( ) ; <nl> } <nl> <nl> - _softLimit = softLimit ; <nl> - _hardLimit = hardLimit ; <nl> + / / special case : finalize shrinking case above <nl> + if ( ( softLimit = = Cache : : minSize ) & & ( hardLimit = = Cache : : minSize ) & & <nl> + ( usage < hardLimit ) ) { <nl> + return approve ( ) ; <nl> + } <nl> + <nl> + / / general case : start shrinking <nl> + if ( ( hardLimit = = hardUsageLimit ) & & ( softLimit < hardLimit ) & & <nl> + ( softLimit + fixed < = std : : min ( deservedSize , maxSize ) ) ) { <nl> + return approve ( ) ; <nl> + } <nl> + <nl> + / / general case : finish shrinking <nl> + if ( ( softLimit = = softUsageLimit ) & & ( softLimit = = hardLimit ) & & <nl> + ( usage < = hardLimit ) ) { <nl> + return approve ( ) ; <nl> + } <nl> <nl> - return true ; <nl> + / / general case : adjust both above usage but below deserved / maxSize <nl> + if ( ( softLimit = = hardLimit ) & & ( usage < = hardLimit ) & & <nl> + ( ( hardLimit + fixed ) < = std : : min ( deservedSize , maxSize ) ) ) { <nl> + return approve ( ) ; <nl> + } <nl> + <nl> + return false ; <nl> } <nl> <nl> - void Metadata : : grantAuxiliaryTable ( uint8_t * table , uint32_t logSize ) { <nl> + uint64_t Metadata : : adjustDeserved ( uint64_t deserved ) { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> - _auxiliaryTable = table ; <nl> - _auxiliaryLogSize = logSize ; <nl> + deservedSize = std : : min ( deserved , maxSize ) ; <nl> + return deservedSize ; <nl> } <nl> <nl> - void Metadata : : swapTables ( ) { <nl> + uint64_t Metadata : : newLimit ( ) { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> - std : : swap ( _table , _auxiliaryTable ) ; <nl> - std : : swap ( _logSize , _auxiliaryLogSize ) ; <nl> + uint64_t fixed = fixedSize + tableSize + Manager : : cacheRecordOverhead ; <nl> + return ( ( Cache : : minSize + fixed ) > = deservedSize ) <nl> + ? Cache : : minSize <nl> + : std : : min ( ( deservedSize - fixed ) , 4 * hardUsageLimit ) ; <nl> } <nl> <nl> - uint8_t * Metadata : : releaseTable ( ) { <nl> + bool Metadata : : migrationAllowed ( uint64_t newTableSize ) { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> - uint8_t * table = _table ; <nl> - _table = nullptr ; <nl> - _logSize = 0 ; <nl> - return table ; <nl> + return ( hardUsageLimit + fixedSize + newTableSize + <nl> + Manager : : cacheRecordOverhead < = <nl> + std : : min ( deservedSize , maxSize ) ) ; <nl> } <nl> <nl> - uint8_t * Metadata : : releaseAuxiliaryTable ( ) { <nl> - TRI_ASSERT ( isLocked ( ) ) ; <nl> - uint8_t * table = _auxiliaryTable ; <nl> - _auxiliaryTable = nullptr ; <nl> - _auxiliaryLogSize = 0 ; <nl> - return table ; <nl> + void Metadata : : changeTable ( uint64_t newTableSize ) { <nl> + tableSize = newTableSize ; <nl> + allocatedSize = <nl> + hardUsageLimit + fixedSize + tableSize + Manager : : cacheRecordOverhead ; <nl> } <nl> <nl> bool Metadata : : isSet ( State : : Flag flag ) const { <nl> mmm a / arangod / Cache / Metadata . h <nl> ppp b / arangod / Cache / Metadata . h <nl> class Cache ; / / forward declaration <nl> / / / @ brief Metadata object to facilitate information sharing between individual <nl> / / / Cache instances and Manager . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - class Metadata { <nl> - public : <nl> + struct Metadata { <nl> + / / info about allocated memory <nl> + uint64_t fixedSize ; <nl> + uint64_t tableSize ; <nl> + uint64_t maxSize ; <nl> + uint64_t allocatedSize ; <nl> + uint64_t deservedSize ; <nl> + <nl> + / / vital information about memory usage <nl> + uint64_t usage ; <nl> + uint64_t softUsageLimit ; <nl> + uint64_t hardUsageLimit ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Default constructor for placeholder objects . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + Metadata ( ) ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Initializes record with given information . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - Metadata ( uint64_t limit ) ; <nl> + Metadata ( uint64_t usage , uint64_t fixed , uint64_t table , uint64_t max ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Initializes record from an existing record . <nl> class Metadata { <nl> Metadata ( Metadata const & other ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Links the metadata object to an actual cache . <nl> + / / / @ brief Initializes record from an existing record . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - void link ( std : : shared_ptr < Cache > cache ) ; <nl> + Metadata & operator = ( Metadata const & other ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Locks the record . <nl> class Metadata { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> bool isLocked ( ) const ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Returns a shared pointer to the underlying cache . Requires record <nl> - / / / to be locked . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - std : : shared_ptr < Cache > cache ( ) const ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Pointer to the table . Requires record to be locked . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint8_t * table ( ) const ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief The base - 2 logarithm of the number of buckets in the table . <nl> - / / / Requires record to be locked . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint32_t logSize ( ) const ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Pointer to the auxiliary table . Requires record to be locked . <nl> - / / / <nl> - / / / Will typically be nullptr . This will be set to a non - null value prior to <nl> - / / / migration . During migration , both tables will temporarily be in use . Upon <nl> - / / / completion of migration , the tables are swapped and the old table is <nl> - / / / released to the manager . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint8_t * auxiliaryTable ( ) const ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief The base - 2 logarithm of the number of buckets in the auxiliary <nl> - / / / table . Requires record to be locked . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint32_t auxiliaryLogSize ( ) const ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief The current memory usage of the cache in bytes . Requires record to <nl> - / / / be locked . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint64_t usage ( ) const ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief The soft usage limit for this cache . Requires record to be locked . <nl> - / / / <nl> - / / / Typically , this will be equal to the hard limit . It may be lower when the <nl> - / / / cache is resizing . If the current usage is below the soft limit , then new <nl> - / / / insertions are not allowed to exceed the soft limit . If the current usage <nl> - / / / is above the soft limit , then new insertions may occur as long as they do <nl> - / / / not exceed the hard limit ; a background task will be working in parallel <nl> - / / / to remove older values to bring usage below the soft limit . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint64_t softLimit ( ) const ; <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief The hard usage limit for this cache . Requires record to be locked . <nl> - / / / <nl> - / / / Usage is guaranteed to remain under this value at all times . <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint64_t hardLimit ( ) const ; <nl> - <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Adjusts usage by the specified amount if it will not violate <nl> / / / limits . Requires record to be locked . <nl> class Metadata { <nl> bool adjustLimits ( uint64_t softLimit , uint64_t hardLimit ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Lets the manager grant a new table lease to the cache for <nl> - / / / migration . Requires record to be locked . <nl> + / / / @ brief Sets the deserved size . Requires record to be locked . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - void grantAuxiliaryTable ( uint8_t * table , uint32_t logSize ) ; <nl> + uint64_t adjustDeserved ( uint64_t deserved ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Swap the main and auxiliary tables ( both pointers and sizes ) . <nl> - / / / Requires record to be locked . <nl> + / / / @ brief Calculates the new usage limit based on deserved size and other <nl> + / / / values . Requires record to be locked . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - void swapTables ( ) ; <nl> + uint64_t newLimit ( ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Release the main table back to the manager . Requires record to be <nl> - / / / locked . <nl> + / / / @ brief Checks feasibility of new table size prior to migration . Requires <nl> + / / / record to be locked . <nl> + / / / <nl> + / / / If migrating to table of new size would exceed either deserved or maximum <nl> + / / / size , then returns false . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint8_t * releaseTable ( ) ; <nl> + bool migrationAllowed ( uint64_t newTableSize ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief Release the auxiliary table back to the manager . Requires record to <nl> - / / / be locked . <nl> + / / / @ brief Sets the table size after migration . Requires record to be locked . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - uint8_t * releaseAuxiliaryTable ( ) ; <nl> + void changeTable ( uint64_t newTableSize ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Checks if flag is set in state . Requires record to be locked . <nl> class Metadata { <nl> <nl> private : <nl> State _state ; <nl> - <nl> - / / pointer to underlying cache <nl> - std : : shared_ptr < Cache > _cache ; <nl> - <nl> - / / vital information about memory usage <nl> - uint64_t _usage ; <nl> - uint64_t _softLimit ; <nl> - uint64_t _hardLimit ; <nl> - <nl> - / / information about table leases <nl> - uint8_t * _table ; <nl> - uint8_t * _auxiliaryTable ; <nl> - uint32_t _logSize ; <nl> - uint32_t _auxiliaryLogSize ; <nl> } ; <nl> <nl> } ; / / end namespace cache <nl> mmm a / arangod / Cache / PlainBucket . cpp <nl> ppp b / arangod / Cache / PlainBucket . cpp <nl> <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - size_t PlainBucket : : SLOTS_DATA = 5 ; <nl> - <nl> PlainBucket : : PlainBucket ( ) { <nl> _state . lock ( ) ; <nl> clear ( ) ; <nl> bool PlainBucket : : isMigrated ( ) const { <nl> bool PlainBucket : : isFull ( ) const { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> bool hasEmptySlot = false ; <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> - size_t slot = SLOTS_DATA - ( i + 1 ) ; <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> + size_t slot = slotsData - ( i + 1 ) ; <nl> if ( _cachedHashes [ slot ] = = 0 ) { <nl> hasEmptySlot = true ; <nl> break ; <nl> CachedValue * PlainBucket : : find ( uint32_t hash , void const * key , uint32_t keySize , <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> CachedValue * result = nullptr ; <nl> <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> if ( _cachedHashes [ i ] = = 0 ) { <nl> break ; <nl> } <nl> CachedValue * PlainBucket : : find ( uint32_t hash , void const * key , uint32_t keySize , <nl> / / requires there to be an open slot , otherwise will not be inserted <nl> void PlainBucket : : insert ( uint32_t hash , CachedValue * value ) { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> if ( _cachedHashes [ i ] = = 0 ) { <nl> / / found an empty slot <nl> _cachedHashes [ i ] = hash ; <nl> CachedValue * PlainBucket : : remove ( uint32_t hash , void const * key , <nl> <nl> CachedValue * PlainBucket : : evictionCandidate ( bool ignoreRefCount ) const { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> - size_t slot = SLOTS_DATA - ( i + 1 ) ; <nl> - if ( _cachedHashes [ slot ] = = 0 ) { <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> + size_t slot = slotsData - ( i + 1 ) ; <nl> + if ( _cachedData [ slot ] = = nullptr ) { <nl> continue ; <nl> } <nl> if ( ignoreRefCount | | _cachedData [ slot ] - > isFreeable ( ) ) { <nl> CachedValue * PlainBucket : : evictionCandidate ( bool ignoreRefCount ) const { <nl> <nl> void PlainBucket : : evict ( CachedValue * value , bool optimizeForInsertion ) { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> - size_t slot = SLOTS_DATA - ( i + 1 ) ; <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> + size_t slot = slotsData - ( i + 1 ) ; <nl> if ( _cachedData [ slot ] = = value ) { <nl> / / found a match <nl> _cachedHashes [ slot ] = 0 ; <nl> void PlainBucket : : moveSlot ( size_t slot , bool moveToFront ) { <nl> } <nl> } else { <nl> / / move slot to back <nl> - for ( ; ( i < SLOTS_DATA - 1 ) & & ( _cachedHashes [ i + 1 ] ! = 0 ) ; i + + ) { <nl> + for ( ; ( i < slotsData - 1 ) & & ( _cachedHashes [ i + 1 ] ! = 0 ) ; i + + ) { <nl> _cachedHashes [ i ] = _cachedHashes [ i + 1 ] ; <nl> _cachedData [ i ] = _cachedData [ i + 1 ] ; <nl> } <nl> mmm a / arangod / Cache / PlainBucket . h <nl> ppp b / arangod / Cache / PlainBucket . h <nl> <nl> <nl> # include " Basics / Common . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / State . h " <nl> <nl> # include < stdint . h > <nl> namespace cache { <nl> / / / synchronization . Data entries are carefully laid out to ensure the structure <nl> / / / fits in a single cacheline . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - struct alignas ( 64 ) PlainBucket { <nl> + struct alignas ( BUCKET_SIZE ) PlainBucket { <nl> State _state ; <nl> <nl> / / actual cached entries <nl> - uint32_t _cachedHashes [ 5 ] ; <nl> - CachedValue * _cachedData [ 5 ] ; <nl> - static size_t SLOTS_DATA ; <nl> + static constexpr size_t slotsData = 5 ; <nl> + uint32_t _cachedHashes [ slotsData ] ; <nl> + CachedValue * _cachedData [ slotsData ] ; <nl> <nl> / / padding , if necessary ? <nl> # ifdef TRI_PADDING_32 <nl> struct alignas ( 64 ) PlainBucket { <nl> void moveSlot ( size_t slot , bool moveToFront ) ; <nl> } ; <nl> <nl> + / / ensure that PlainBucket is exactly BUCKET_SIZE <nl> + static_assert ( sizeof ( PlainBucket ) = = BUCKET_SIZE , <nl> + " Expected sizeof ( PlainBucket ) = = BUCKET_SIZE . " ) ; <nl> + <nl> } ; / / end namespace cache <nl> } ; / / end namespace arangodb <nl> <nl> mmm a / arangod / Cache / PlainCache . cpp <nl> ppp b / arangod / Cache / PlainCache . cpp <nl> <nl> # include " Basics / Common . h " <nl> # include " Cache / Cache . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / FrequencyBuffer . h " <nl> # include " Cache / Metadata . h " <nl> # include " Cache / PlainBucket . h " <nl> # include " Cache / State . h " <nl> - # include " Random / RandomGenerator . h " <nl> + # include " Cache / Table . h " <nl> <nl> # include < stdint . h > <nl> # include < atomic > <nl> <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - static constexpr int64_t TRIES_FAST = 50LL ; <nl> - static constexpr int64_t TRIES_SLOW = 10000LL ; <nl> - static constexpr int64_t TRIES_GUARANTEE = - 1LL ; <nl> - <nl> Cache : : Finding PlainCache : : find ( void const * key , uint32_t keySize ) { <nl> TRI_ASSERT ( key ! = nullptr ) ; <nl> Finding result ( nullptr ) ; <nl> Cache : : Finding PlainCache : : find ( void const * key , uint32_t keySize ) { <nl> <nl> bool ok ; <nl> PlainBucket * bucket ; <nl> - std : : tie ( ok , bucket ) = getBucket ( hash , TRIES_FAST ) ; <nl> + std : : shared_ptr < Table > source ; <nl> + std : : tie ( ok , bucket , source ) = getBucket ( hash , Cache : : triesFast ) ; <nl> <nl> if ( ok ) { <nl> result . reset ( bucket - > find ( hash , key , keySize ) ) ; <nl> bool PlainCache : : insert ( CachedValue * value ) { <nl> <nl> bool ok ; <nl> PlainBucket * bucket ; <nl> - std : : tie ( ok , bucket ) = getBucket ( hash , TRIES_FAST ) ; <nl> + std : : shared_ptr < Table > source ; <nl> + std : : tie ( ok , bucket , source ) = getBucket ( hash , Cache : : triesFast ) ; <nl> <nl> if ( ok ) { <nl> bool allowed = true ; <nl> bool eviction = false ; <nl> - int64_t change = value - > size ( ) ; <nl> + bool maybeMigrate = false ; <nl> + int64_t change = static_cast < int64_t > ( value - > size ( ) ) ; <nl> CachedValue * candidate = bucket - > find ( hash , value - > key ( ) , value - > keySize ) ; <nl> <nl> if ( candidate = = nullptr & & bucket - > isFull ( ) ) { <nl> candidate = bucket - > evictionCandidate ( ) ; <nl> if ( candidate = = nullptr ) { <nl> allowed = false ; <nl> - } else { <nl> - eviction = true ; <nl> } <nl> } <nl> <nl> if ( allowed ) { <nl> if ( candidate ! = nullptr ) { <nl> - change - = candidate - > size ( ) ; <nl> + change - = static_cast < int64_t > ( candidate - > size ( ) ) ; <nl> } <nl> <nl> - _metadata - > lock ( ) ; <nl> - allowed = _metadata - > adjustUsageIfAllowed ( change ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . lock ( ) ; <nl> + allowed = _metadata . adjustUsageIfAllowed ( change ) ; <nl> + _metadata . unlock ( ) ; <nl> <nl> if ( allowed ) { <nl> if ( candidate ! = nullptr ) { <nl> bucket - > evict ( candidate , true ) ; <nl> freeValue ( candidate ) ; <nl> + eviction = true ; <nl> } <nl> - recordStat ( eviction ? Stat : : insertEviction : Stat : : insertNoEviction ) ; <nl> bucket - > insert ( hash , value ) ; <nl> inserted = true ; <nl> + if ( ! eviction ) { <nl> + maybeMigrate = source - > slotFilled ( ) ; <nl> + } <nl> } else { <nl> - requestResize ( ) ; / / let function do the hard work <nl> + requestGrow ( ) ; / / let function do the hard work <nl> } <nl> } <nl> <nl> bucket - > unlock ( ) ; <nl> - if ( inserted ) { <nl> - requestMigrate ( ) ; / / let function do the hard work <nl> + if ( maybeMigrate ) { <nl> + requestMigrate ( _table - > idealSize ( ) ) ; / / let function do the hard work <nl> } <nl> endOperation ( ) ; <nl> } <nl> bool PlainCache : : insert ( CachedValue * value ) { <nl> bool PlainCache : : remove ( void const * key , uint32_t keySize ) { <nl> TRI_ASSERT ( key ! = nullptr ) ; <nl> bool removed = false ; <nl> + bool maybeMigrate = false ; <nl> uint32_t hash = hashKey ( key , keySize ) ; <nl> <nl> bool ok ; <nl> PlainBucket * bucket ; <nl> - std : : tie ( ok , bucket ) = getBucket ( hash , TRIES_SLOW ) ; <nl> + std : : shared_ptr < Table > source ; <nl> + std : : tie ( ok , bucket , source ) = getBucket ( hash , Cache : : triesSlow ) ; <nl> <nl> if ( ok ) { <nl> CachedValue * candidate = bucket - > remove ( hash , key , keySize ) ; <nl> bool PlainCache : : remove ( void const * key , uint32_t keySize ) { <nl> if ( candidate ! = nullptr ) { <nl> int64_t change = - static_cast < int64_t > ( candidate - > size ( ) ) ; <nl> <nl> - _metadata - > lock ( ) ; <nl> - bool allowed = _metadata - > adjustUsageIfAllowed ( change ) ; <nl> + _metadata . lock ( ) ; <nl> + bool allowed = _metadata . adjustUsageIfAllowed ( change ) ; <nl> TRI_ASSERT ( allowed ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . unlock ( ) ; <nl> <nl> freeValue ( candidate ) ; <nl> + maybeMigrate = source - > slotEmptied ( ) ; <nl> } <nl> <nl> removed = true ; <nl> bucket - > unlock ( ) ; <nl> + if ( maybeMigrate ) { <nl> + requestMigrate ( _table - > idealSize ( ) ) ; <nl> + } <nl> endOperation ( ) ; <nl> } <nl> <nl> bool PlainCache : : blacklist ( void const * key , uint32_t keySize ) { return false ; } <nl> <nl> uint64_t PlainCache : : allocationSize ( bool enableWindowedStats ) { <nl> return sizeof ( PlainCache ) + <nl> - StatBuffer : : allocationSize ( _evictionStatsCapacity ) + <nl> ( enableWindowedStats ? ( sizeof ( StatBuffer ) + <nl> StatBuffer : : allocationSize ( _findStatsCapacity ) ) <nl> : 0 ) ; <nl> } <nl> <nl> - std : : shared_ptr < Cache > PlainCache : : create ( Manager * manager , <nl> - Manager : : MetadataItr metadata , <nl> - bool allowGrowth , <nl> + std : : shared_ptr < Cache > PlainCache : : create ( Manager * manager , Metadata metadata , <nl> + std : : shared_ptr < Table > table , <nl> bool enableWindowedStats ) { <nl> return std : : make_shared < PlainCache > ( Cache : : ConstructionGuard ( ) , manager , <nl> - metadata , allowGrowth , <nl> - enableWindowedStats ) ; <nl> + metadata , table , enableWindowedStats ) ; <nl> } <nl> <nl> PlainCache : : PlainCache ( Cache : : ConstructionGuard guard , Manager * manager , <nl> - Manager : : MetadataItr metadata , bool allowGrowth , <nl> + Metadata metadata , std : : shared_ptr < Table > table , <nl> bool enableWindowedStats ) <nl> - : Cache ( guard , manager , metadata , allowGrowth , enableWindowedStats ) , <nl> - _table ( nullptr ) , <nl> - _logSize ( 0 ) , <nl> - _tableSize ( 1 ) , <nl> - _maskShift ( 32 ) , <nl> - _bucketMask ( 0 ) , <nl> - _auxiliaryTable ( nullptr ) , <nl> - _auxiliaryLogSize ( 0 ) , <nl> - _auxiliaryTableSize ( 1 ) , <nl> - _auxiliaryMaskShift ( 32 ) , <nl> - _auxiliaryBucketMask ( 0 ) { <nl> - _state . lock ( ) ; <nl> - if ( isOperational ( ) ) { <nl> - _metadata - > lock ( ) ; <nl> - _table = reinterpret_cast < PlainBucket * > ( _metadata - > table ( ) ) ; <nl> - _logSize = _metadata - > logSize ( ) ; <nl> - _tableSize = ( 1ULL < < _logSize ) ; <nl> - _maskShift = 32 - _logSize ; <nl> - _bucketMask = ( _tableSize - 1 ) < < _maskShift ; <nl> - _metadata - > unlock ( ) ; <nl> - } <nl> - _state . unlock ( ) ; <nl> - } <nl> + : Cache ( guard , manager , metadata , table , enableWindowedStats , <nl> + PlainCache : : bucketClearer , PlainBucket : : slotsData ) { } <nl> <nl> PlainCache : : ~ PlainCache ( ) { <nl> _state . lock ( ) ; <nl> PlainCache : : ~ PlainCache ( ) { <nl> } <nl> } <nl> <nl> - bool PlainCache : : freeMemory ( ) { <nl> - _state . lock ( ) ; <nl> - if ( ! isOperational ( ) ) { <nl> - _state . unlock ( ) ; <nl> - return false ; <nl> - } <nl> - startOperation ( ) ; <nl> - _state . unlock ( ) ; <nl> - <nl> - bool underLimit = reclaimMemory ( 0ULL ) ; <nl> - uint64_t failures = 0 ; <nl> - while ( ! underLimit ) { <nl> - / / pick a random bucket <nl> - uint32_t randomHash = RandomGenerator : : interval ( UINT32_MAX ) ; <nl> - bool ok ; <nl> - PlainBucket * bucket ; <nl> - std : : tie ( ok , bucket ) = getBucket ( randomHash , TRIES_FAST , false ) ; <nl> - <nl> - if ( ok ) { <nl> - failures = 0 ; <nl> - / / evict LRU freeable value if exists <nl> - CachedValue * candidate = bucket - > evictionCandidate ( ) ; <nl> + uint64_t PlainCache : : freeMemoryFrom ( uint32_t hash ) { <nl> + uint64_t reclaimed = 0 ; <nl> + bool ok ; <nl> + bool maybeMigrate = false ; <nl> + PlainBucket * bucket ; <nl> + std : : shared_ptr < Table > source ; <nl> + std : : tie ( ok , bucket , source ) = getBucket ( hash , Cache : : triesFast , false ) ; <nl> <nl> - if ( candidate ! = nullptr ) { <nl> - uint64_t size = candidate - > size ( ) ; <nl> - bucket - > evict ( candidate ) ; <nl> - freeValue ( candidate ) ; <nl> + if ( ok ) { <nl> + / / evict LRU freeable value if exists <nl> + CachedValue * candidate = bucket - > evictionCandidate ( ) ; <nl> <nl> - underLimit = reclaimMemory ( size ) ; <nl> - } <nl> + if ( candidate ! = nullptr ) { <nl> + reclaimed = candidate - > size ( ) ; <nl> + bucket - > evict ( candidate ) ; <nl> + freeValue ( candidate ) ; <nl> + maybeMigrate = source - > slotEmptied ( ) ; <nl> + } <nl> <nl> - bucket - > unlock ( ) ; <nl> - } else { <nl> - failures + + ; <nl> - if ( failures > 100 ) { <nl> - _state . lock ( ) ; <nl> - bool shouldQuit = ! isOperational ( ) ; <nl> - _state . unlock ( ) ; <nl> + bucket - > unlock ( ) ; <nl> + } <nl> <nl> - if ( shouldQuit ) { <nl> - break ; <nl> - } else { <nl> - failures = 0 ; <nl> - } <nl> - } <nl> - } <nl> + if ( maybeMigrate ) { <nl> + requestMigrate ( _table - > idealSize ( ) ) ; <nl> } <nl> <nl> - endOperation ( ) ; <nl> - return true ; <nl> + return reclaimed ; <nl> } <nl> <nl> - bool PlainCache : : migrate ( ) { <nl> - _state . lock ( ) ; <nl> - if ( ! isOperational ( ) ) { <nl> - _state . unlock ( ) ; <nl> - return false ; <nl> - } <nl> - startOperation ( ) ; <nl> - _metadata - > lock ( ) ; <nl> - if ( _metadata - > table ( ) = = nullptr | | _metadata - > auxiliaryTable ( ) = = nullptr ) { <nl> - _metadata - > unlock ( ) ; <nl> - _state . unlock ( ) ; <nl> - endOperation ( ) ; <nl> - return false ; <nl> - } <nl> - _auxiliaryTable = reinterpret_cast < PlainBucket * > ( _metadata - > auxiliaryTable ( ) ) ; <nl> - _auxiliaryLogSize = _metadata - > auxiliaryLogSize ( ) ; <nl> - _auxiliaryTableSize = ( 1ULL < < _auxiliaryLogSize ) ; <nl> - _auxiliaryMaskShift = ( 32 - _auxiliaryLogSize ) ; <nl> - _auxiliaryBucketMask = ( _auxiliaryTableSize - 1 ) < < _auxiliaryMaskShift ; <nl> - _metadata - > unlock ( ) ; <nl> - _state . toggleFlag ( State : : Flag : : migrating ) ; <nl> - _state . unlock ( ) ; <nl> - <nl> - for ( uint32_t i = 0 ; i < _tableSize ; i + + ) { <nl> - / / lock current bucket <nl> - PlainBucket * bucket = & ( _table [ i ] ) ; <nl> - bucket - > lock ( - 1LL ) ; <nl> - <nl> - / / collect target bucket ( s ) <nl> - std : : vector < PlainBucket * > targets ; <nl> - if ( _logSize > _auxiliaryLogSize ) { <nl> - uint32_t targetIndex = ( i < < _maskShift ) > > _auxiliaryMaskShift ; <nl> - targets . emplace_back ( & ( _auxiliaryTable [ targetIndex ] ) ) ; <nl> - } else { <nl> - uint32_t baseIndex = ( i < < _maskShift ) > > _auxiliaryMaskShift ; <nl> - for ( size_t j = 0 ; j < ( 1U < < ( _auxiliaryLogSize - _logSize ) ) ; j + + ) { <nl> - uint32_t targetIndex = baseIndex + j ; <nl> - targets . emplace_back ( & ( _auxiliaryTable [ targetIndex ] ) ) ; <nl> - } <nl> - } <nl> - / / lock target bucket ( s ) <nl> - for ( PlainBucket * targetBucket : targets ) { <nl> - targetBucket - > lock ( TRIES_GUARANTEE ) ; <nl> - } <nl> - <nl> - for ( size_t j = 0 ; j < PlainBucket : : SLOTS_DATA ; j + + ) { <nl> - size_t k = PlainBucket : : SLOTS_DATA - ( j + 1 ) ; <nl> - if ( ( * bucket ) . _cachedHashes [ k ] ! = 0 ) { <nl> - uint32_t hash = bucket - > _cachedHashes [ k ] ; <nl> - CachedValue * value = bucket - > _cachedData [ k ] ; <nl> - <nl> - uint32_t targetIndex = <nl> - ( hash & _auxiliaryBucketMask ) > > _auxiliaryMaskShift ; <nl> - PlainBucket * targetBucket = & ( _auxiliaryTable [ targetIndex ] ) ; <nl> - bool haveSpace = true ; <nl> - if ( targetBucket - > isFull ( ) ) { <nl> - CachedValue * candidate = targetBucket - > evictionCandidate ( ) ; <nl> - if ( candidate ! = nullptr ) { <nl> - targetBucket - > evict ( candidate , true ) ; <nl> - uint64_t size = candidate - > size ( ) ; <nl> - freeValue ( candidate ) ; <nl> - reclaimMemory ( size ) ; <nl> - } else { <nl> - haveSpace = false ; <nl> - } <nl> - } <nl> - if ( haveSpace ) { <nl> - targetBucket - > insert ( hash , value ) ; <nl> - } else { <nl> - uint64_t size = value - > size ( ) ; <nl> - freeValue ( value ) ; <nl> + void PlainCache : : migrateBucket ( void * sourcePtr , <nl> + std : : unique_ptr < Table : : Subtable > targets , <nl> + std : : shared_ptr < Table > newTable ) { <nl> + / / lock current bucket <nl> + auto source = reinterpret_cast < PlainBucket * > ( sourcePtr ) ; <nl> + source - > lock ( Cache : : triesGuarantee ) ; <nl> + <nl> + / / lock target bucket ( s ) <nl> + targets - > applyToAllBuckets ( [ ] ( void * ptr ) - > bool { <nl> + auto targetBucket = reinterpret_cast < PlainBucket * > ( ptr ) ; <nl> + return targetBucket - > lock ( Cache : : triesGuarantee ) ; <nl> + } ) ; <nl> + <nl> + for ( size_t j = 0 ; j < PlainBucket : : slotsData ; j + + ) { <nl> + size_t k = PlainBucket : : slotsData - ( j + 1 ) ; <nl> + if ( source - > _cachedHashes [ k ] ! = 0 ) { <nl> + uint32_t hash = source - > _cachedHashes [ k ] ; <nl> + CachedValue * value = source - > _cachedData [ k ] ; <nl> + <nl> + auto targetBucket = <nl> + reinterpret_cast < PlainBucket * > ( targets - > fetchBucket ( hash ) ) ; <nl> + bool haveSpace = true ; <nl> + if ( targetBucket - > isFull ( ) ) { <nl> + CachedValue * candidate = targetBucket - > evictionCandidate ( ) ; <nl> + if ( candidate ! = nullptr ) { <nl> + targetBucket - > evict ( candidate , true ) ; <nl> + uint64_t size = candidate - > size ( ) ; <nl> + freeValue ( candidate ) ; <nl> reclaimMemory ( size ) ; <nl> + newTable - > slotEmptied ( ) ; <nl> + } else { <nl> + haveSpace = false ; <nl> } <nl> - <nl> - bucket - > _cachedHashes [ k ] = 0 ; <nl> - bucket - > _cachedData [ k ] = nullptr ; <nl> } <nl> - } <nl> + if ( haveSpace ) { <nl> + targetBucket - > insert ( hash , value ) ; <nl> + newTable - > slotFilled ( ) ; <nl> + } else { <nl> + uint64_t size = value - > size ( ) ; <nl> + freeValue ( value ) ; <nl> + reclaimMemory ( size ) ; <nl> + } <nl> <nl> - / / unlock targets <nl> - for ( PlainBucket * targetBucket : targets ) { <nl> - targetBucket - > unlock ( ) ; <nl> + source - > _cachedHashes [ k ] = 0 ; <nl> + source - > _cachedData [ k ] = nullptr ; <nl> } <nl> - <nl> - / / finish up this bucket ' s migration <nl> - bucket - > _state . toggleFlag ( State : : Flag : : migrated ) ; <nl> - bucket - > unlock ( ) ; <nl> } <nl> <nl> - / / swap tables and unmark local migrating flag <nl> - _state . lock ( ) ; <nl> - std : : swap ( _table , _auxiliaryTable ) ; <nl> - std : : swap ( _logSize , _auxiliaryLogSize ) ; <nl> - std : : swap ( _tableSize , _auxiliaryTableSize ) ; <nl> - std : : swap ( _maskShift , _auxiliaryMaskShift ) ; <nl> - std : : swap ( _bucketMask , _auxiliaryBucketMask ) ; <nl> - _state . toggleFlag ( State : : Flag : : migrating ) ; <nl> - _state . unlock ( ) ; <nl> - <nl> - / / clear out old table <nl> - clearTable ( _auxiliaryTable , _auxiliaryTableSize ) ; <nl> - <nl> - / / release references to old table <nl> - _state . lock ( ) ; <nl> - _auxiliaryTable = nullptr ; <nl> - _auxiliaryLogSize = 0 ; <nl> - _auxiliaryTableSize = 1 ; <nl> - _auxiliaryMaskShift = 32 ; <nl> - _auxiliaryBucketMask = 0 ; <nl> - _state . unlock ( ) ; <nl> - <nl> - / / swap table in metadata <nl> - _metadata - > lock ( ) ; <nl> - _metadata - > swapTables ( ) ; <nl> - _metadata - > unlock ( ) ; <nl> - <nl> - endOperation ( ) ; <nl> - return true ; <nl> - } <nl> + / / unlock targets <nl> + targets - > applyToAllBuckets ( [ ] ( void * ptr ) - > bool { <nl> + auto targetBucket = reinterpret_cast < PlainBucket * > ( ptr ) ; <nl> + targetBucket - > unlock ( ) ; <nl> + return true ; <nl> + } ) ; <nl> <nl> - void PlainCache : : clearTables ( ) { <nl> - if ( _table ! = nullptr ) { <nl> - clearTable ( _table , _tableSize ) ; <nl> - } <nl> - if ( _auxiliaryTable ! = nullptr ) { <nl> - clearTable ( _auxiliaryTable , _auxiliaryTableSize ) ; <nl> - } <nl> + / / finish up this bucket ' s migration <nl> + source - > _state . toggleFlag ( State : : Flag : : migrated ) ; <nl> + source - > unlock ( ) ; <nl> } <nl> <nl> - std : : pair < bool , PlainBucket * > PlainCache : : getBucket ( uint32_t hash , <nl> - int64_t maxTries , <nl> - bool singleOperation ) { <nl> + std : : tuple < bool , PlainBucket * , std : : shared_ptr < Table > > PlainCache : : getBucket ( <nl> + uint32_t hash , int64_t maxTries , bool singleOperation ) { <nl> PlainBucket * bucket = nullptr ; <nl> + std : : shared_ptr < Table > source ( nullptr ) ; <nl> <nl> bool ok = _state . lock ( maxTries ) ; <nl> if ( ok ) { <nl> std : : pair < bool , PlainBucket * > PlainCache : : getBucket ( uint32_t hash , <nl> if ( singleOperation ) { <nl> startOperation ( ) ; <nl> started = true ; <nl> - _metadata - > lock ( ) ; <nl> - _manager - > reportAccess ( _metadata - > cache ( ) ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _manager - > reportAccess ( shared_from_this ( ) ) ; <nl> } <nl> <nl> - bucket = & ( _table [ getIndex ( hash , false ) ] ) ; <nl> - ok = bucket - > lock ( maxTries ) ; <nl> - if ( ok & & <nl> - bucket - > isMigrated ( ) ) { / / get bucket from auxiliary table instead <nl> - bucket - > unlock ( ) ; <nl> - bucket = & ( _auxiliaryTable [ getIndex ( hash , true ) ] ) ; <nl> - ok = bucket - > lock ( maxTries ) ; <nl> - if ( ok & & bucket - > isMigrated ( ) ) { <nl> - ok = false ; <nl> - bucket - > unlock ( ) ; <nl> - } <nl> - } <nl> + auto pair = _table - > fetchAndLockBucket ( hash , maxTries ) ; <nl> + bucket = reinterpret_cast < PlainBucket * > ( pair . first ) ; <nl> + source = pair . second ; <nl> + ok = ( bucket ! = nullptr ) ; <nl> } <nl> if ( ! ok & & started ) { <nl> endOperation ( ) ; <nl> std : : pair < bool , PlainBucket * > PlainCache : : getBucket ( uint32_t hash , <nl> _state . unlock ( ) ; <nl> } <nl> <nl> - return std : : pair < bool , PlainBucket * > ( ok , bucket ) ; <nl> + return std : : make_tuple ( ok , bucket , source ) ; <nl> } <nl> <nl> - void PlainCache : : clearTable ( PlainBucket * table , uint64_t tableSize ) { <nl> - for ( uint64_t i = 0 ; i < tableSize ; i + + ) { <nl> - PlainBucket * bucket = & ( table [ i ] ) ; <nl> - bucket - > lock ( - 1LL ) ; <nl> - for ( size_t j = 0 ; j < PlainBucket : : SLOTS_DATA ; j + + ) { <nl> + Table : : BucketClearer PlainCache : : bucketClearer ( Metadata * metadata ) { <nl> + return [ metadata ] ( void * ptr ) - > void { <nl> + auto bucket = reinterpret_cast < PlainBucket * > ( ptr ) ; <nl> + bucket - > lock ( Cache : : triesGuarantee ) ; <nl> + for ( size_t j = 0 ; j < PlainBucket : : slotsData ; j + + ) { <nl> if ( bucket - > _cachedData [ j ] ! = nullptr ) { <nl> uint64_t size = bucket - > _cachedData [ j ] - > size ( ) ; <nl> freeValue ( bucket - > _cachedData [ j ] ) ; <nl> - reclaimMemory ( size ) ; <nl> + metadata - > lock ( ) ; <nl> + metadata - > adjustUsageIfAllowed ( - static_cast < int64_t > ( size ) ) ; <nl> + metadata - > unlock ( ) ; <nl> } <nl> } <nl> bucket - > clear ( ) ; <nl> - } <nl> - } <nl> - <nl> - uint32_t PlainCache : : getIndex ( uint32_t hash , bool useAuxiliary ) const { <nl> - if ( useAuxiliary ) { <nl> - return ( ( hash & _auxiliaryBucketMask ) > > _auxiliaryMaskShift ) ; <nl> - } <nl> - <nl> - return ( ( hash & _bucketMask ) > > _maskShift ) ; <nl> + } ; <nl> } <nl> mmm a / arangod / Cache / PlainCache . h <nl> ppp b / arangod / Cache / PlainCache . h <nl> <nl> # include " Basics / Common . h " <nl> # include " Cache / Cache . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / FrequencyBuffer . h " <nl> # include " Cache / Manager . h " <nl> # include " Cache / ManagerTasks . h " <nl> # include " Cache / Metadata . h " <nl> # include " Cache / PlainBucket . h " <nl> # include " Cache / State . h " <nl> + # include " Cache / Table . h " <nl> <nl> # include < stdint . h > <nl> # include < atomic > <nl> namespace cache { <nl> class PlainCache final : public Cache { <nl> public : <nl> PlainCache ( Cache : : ConstructionGuard guard , Manager * manager , <nl> - Manager : : MetadataItr metadata , bool allowGrowth , <nl> + Metadata metadata , std : : shared_ptr < Table > table , <nl> bool enableWindowedStats ) ; <nl> ~ PlainCache ( ) ; <nl> <nl> class PlainCache final : public Cache { <nl> bool blacklist ( void const * key , uint32_t keySize ) ; <nl> <nl> private : <nl> - / / main table info <nl> - PlainBucket * _table ; <nl> - uint32_t _logSize ; <nl> - uint64_t _tableSize ; <nl> - uint32_t _maskShift ; <nl> - uint32_t _bucketMask ; <nl> - <nl> - / / auxiliary table info <nl> - PlainBucket * _auxiliaryTable ; <nl> - uint32_t _auxiliaryLogSize ; <nl> - uint64_t _auxiliaryTableSize ; <nl> - uint32_t _auxiliaryMaskShift ; <nl> - uint32_t _auxiliaryBucketMask ; <nl> - <nl> / / friend class manager and tasks <nl> friend class FreeMemoryTask ; <nl> friend class Manager ; <nl> class PlainCache final : public Cache { <nl> <nl> private : <nl> static uint64_t allocationSize ( bool enableWindowedStats ) ; <nl> - static std : : shared_ptr < Cache > create ( Manager * manager , <nl> - Manager : : MetadataItr metadata , <nl> - bool allowGrowth , <nl> + static std : : shared_ptr < Cache > create ( Manager * manager , Metadata metadata , <nl> + std : : shared_ptr < Table > table , <nl> bool enableWindowedStats ) ; <nl> - / / management <nl> - bool freeMemory ( ) ; <nl> - bool migrate ( ) ; <nl> - void clearTables ( ) ; <nl> + <nl> + virtual uint64_t freeMemoryFrom ( uint32_t hash ) ; <nl> + virtual void migrateBucket ( void * sourcePtr , <nl> + std : : unique_ptr < Table : : Subtable > targets , <nl> + std : : shared_ptr < Table > newTable ) ; <nl> <nl> / / helpers <nl> - std : : pair < bool , PlainBucket * > getBucket ( uint32_t hash , int64_t maxTries , <nl> - bool singleOperation = true ) ; <nl> - void clearTable ( PlainBucket * table , uint64_t tableSize ) ; <nl> + std : : tuple < bool , PlainBucket * , std : : shared_ptr < Table > > getBucket ( <nl> + uint32_t hash , int64_t maxTries , bool singleOperation = true ) ; <nl> uint32_t getIndex ( uint32_t hash , bool useAuxiliary ) const ; <nl> + <nl> + static Table : : BucketClearer bucketClearer ( Metadata * metadata ) ; <nl> } ; <nl> <nl> } ; / / end namespace cache <nl> mmm a / arangod / Cache / State . cpp <nl> ppp b / arangod / Cache / State . cpp <nl> State : : State ( ) : _state ( 0 ) { } <nl> <nl> State : : State ( State const & other ) : _state ( other . _state . load ( ) ) { } <nl> <nl> + State & State : : operator = ( State const & other ) { <nl> + if ( this ! = & other ) { <nl> + _state = other . _state . load ( ) ; <nl> + } <nl> + <nl> + return * this ; <nl> + } <nl> + <nl> bool State : : isLocked ( ) const { <nl> return ( ( _state . load ( ) & static_cast < uint32_t > ( Flag : : locked ) ) > 0 ) ; <nl> } <nl> mmm a / arangod / Cache / State . h <nl> ppp b / arangod / Cache / State . h <nl> struct State { <nl> enum class Flag : uint32_t { <nl> locked = 0x00000001 , <nl> blacklisted = 0x00000002 , <nl> - migrated = 0x00000004 , <nl> - migrating = 0x00000008 , <nl> - rebalancing = 0x00000010 , <nl> - resizing = 0x00000020 , <nl> - shutdown = 0x00000040 , <nl> - shuttingDown = 0x00000080 <nl> + disabled = 0x00000004 , <nl> + migrated = 0x00000008 , <nl> + migrating = 0x00000010 , <nl> + rebalancing = 0x00000020 , <nl> + resizing = 0x00000040 , <nl> + shutdown = 0x00000080 , <nl> + shuttingDown = 0x00000100 <nl> } ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> struct State { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> State ( State const & other ) ; <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Initializes state to match another <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + State & operator = ( State const & other ) ; <nl> + <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Checks if state is locked . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> new file mode 100644 <nl> index 00000000000 . . 62e7a43f3e6 <nl> mmm / dev / null <nl> ppp b / arangod / Cache / Table . cpp <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / DISCLAIMER <nl> + / / / <nl> + / / / Copyright 2014 - 2017 ArangoDB GmbH , Cologne , Germany <nl> + / / / Copyright 2004 - 2014 triAGENS GmbH , Cologne , Germany <nl> + / / / <nl> + / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / / you may not use this file except in compliance with the License . <nl> + / / / You may obtain a copy of the License at <nl> + / / / <nl> + / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / / <nl> + / / / Unless required by applicable law or agreed to in writing , software <nl> + / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / / See the License for the specific language governing permissions and <nl> + / / / limitations under the License . <nl> + / / / <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> + / / / <nl> + / / / @ author Daniel H . Larkin <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + # include " Cache / Table . h " <nl> + # include " Basics / Common . h " <nl> + # include " Cache / Common . h " <nl> + # include " Cache / State . h " <nl> + <nl> + # include < stdint . h > <nl> + # include < memory > <nl> + # include < stdexcept > <nl> + <nl> + using namespace arangodb : : cache ; <nl> + <nl> + const uint32_t Table : : minLogSize = 8 ; <nl> + const uint32_t Table : : maxLogSize = 32 ; <nl> + <nl> + bool Table : : GenericBucket : : lock ( int64_t maxTries ) { <nl> + return _state . lock ( maxTries ) ; <nl> + } <nl> + <nl> + void Table : : GenericBucket : : unlock ( ) { <nl> + TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> + _state . unlock ( ) ; <nl> + } <nl> + <nl> + bool Table : : GenericBucket : : isMigrated ( ) const { <nl> + TRI_ASSERT ( _state . isLocked ( ) ) ; <nl> + return _state . isSet ( State : : Flag : : migrated ) ; <nl> + } <nl> + <nl> + Table : : Subtable : : Subtable ( std : : shared_ptr < Table > source , GenericBucket * buckets , <nl> + uint64_t size , uint32_t mask , uint32_t shift ) <nl> + : _source ( source ) , <nl> + _buckets ( buckets ) , <nl> + _size ( size ) , <nl> + _mask ( mask ) , <nl> + _shift ( shift ) { } <nl> + <nl> + void * Table : : Subtable : : fetchBucket ( uint32_t hash ) { <nl> + return & ( _buckets [ ( hash & _mask ) > > _shift ] ) ; <nl> + } <nl> + <nl> + bool Table : : Subtable : : applyToAllBuckets ( std : : function < bool ( void * ) > cb ) { <nl> + bool ok = true ; <nl> + for ( uint64_t i = 0 ; i < _size ; i + + ) { <nl> + GenericBucket * bucket = & ( _buckets [ i ] ) ; <nl> + ok = cb ( bucket ) ; <nl> + if ( ! ok ) { <nl> + break ; <nl> + } <nl> + } <nl> + return ok ; <nl> + } <nl> + <nl> + Table : : Table ( uint32_t logSize ) <nl> + : _state ( ) , <nl> + _logSize ( std : : min ( logSize , maxLogSize ) ) , <nl> + _size ( static_cast < uint64_t > ( 1 ) < < _logSize ) , <nl> + _shift ( 32 - _logSize ) , <nl> + _mask ( ( _size - 1 ) < < _shift ) , <nl> + _buckets ( new GenericBucket [ _size ] ) , <nl> + _auxiliary ( nullptr ) , <nl> + _bucketClearer ( defaultClearer ) , <nl> + _slotsTotal ( _size ) , <nl> + _slotsUsed ( 0 ) { <nl> + _state . lock ( ) ; <nl> + _state . toggleFlag ( State : : Flag : : disabled ) ; <nl> + memset ( _buckets . get ( ) , 0 , BUCKET_SIZE * _size ) ; <nl> + _state . unlock ( ) ; <nl> + } <nl> + <nl> + uint64_t Table : : allocationSize ( uint32_t logSize ) { <nl> + return sizeof ( Table ) + ( BUCKET_SIZE * ( static_cast < uint64_t > ( 1 ) < < logSize ) ) ; <nl> + } <nl> + <nl> + uint64_t Table : : memoryUsage ( ) const { return Table : : allocationSize ( _logSize ) ; } <nl> + <nl> + uint64_t Table : : size ( ) const { return _size ; } <nl> + <nl> + uint32_t Table : : logSize ( ) const { return _logSize ; } <nl> + <nl> + std : : pair < void * , std : : shared_ptr < Table > > Table : : fetchAndLockBucket ( <nl> + uint32_t hash , int64_t maxTries ) { <nl> + GenericBucket * bucket = nullptr ; <nl> + std : : shared_ptr < Table > source ( nullptr ) ; <nl> + bool ok = _state . lock ( maxTries ) ; <nl> + if ( ok ) { <nl> + ok = ! _state . isSet ( State : : Flag : : disabled ) ; <nl> + if ( ok ) { <nl> + TRI_ASSERT ( _buckets . get ( ) ! = nullptr ) ; <nl> + bucket = & ( _buckets [ ( hash & _mask ) > > _shift ] ) ; <nl> + source = shared_from_this ( ) ; <nl> + ok = bucket - > lock ( maxTries ) ; <nl> + if ( ok ) { <nl> + if ( bucket - > isMigrated ( ) ) { <nl> + bucket - > unlock ( ) ; <nl> + bucket = nullptr ; <nl> + source . reset ( ) ; <nl> + if ( _auxiliary . get ( ) ! = nullptr ) { <nl> + auto pair = _auxiliary - > fetchAndLockBucket ( hash , maxTries ) ; <nl> + bucket = reinterpret_cast < GenericBucket * > ( pair . first ) ; <nl> + source = pair . second ; <nl> + } <nl> + } <nl> + } else { <nl> + bucket = nullptr ; <nl> + source . reset ( ) ; <nl> + } <nl> + } <nl> + _state . unlock ( ) ; <nl> + } <nl> + <nl> + return std : : make_pair ( bucket , source ) ; <nl> + } <nl> + <nl> + std : : shared_ptr < Table > Table : : setAuxiliary ( std : : shared_ptr < Table > table ) { <nl> + std : : shared_ptr < Table > result = table ; <nl> + if ( table . get ( ) ! = this ) { <nl> + _state . lock ( ) ; <nl> + if ( table . get ( ) = = nullptr ) { <nl> + result = _auxiliary ; <nl> + _auxiliary = table ; <nl> + } else if ( _auxiliary . get ( ) = = nullptr ) { <nl> + _auxiliary = table ; <nl> + result . reset ( ) ; <nl> + } <nl> + _state . unlock ( ) ; <nl> + } <nl> + return result ; <nl> + } <nl> + <nl> + void * Table : : primaryBucket ( uint32_t index ) { <nl> + if ( ! isEnabled ( ) ) { <nl> + return nullptr ; <nl> + } <nl> + TRI_ASSERT ( _buckets . get ( ) ! = nullptr ) ; <nl> + return & ( _buckets [ index ] ) ; <nl> + } <nl> + <nl> + std : : unique_ptr < Table : : Subtable > Table : : auxiliaryBuckets ( uint32_t index ) { <nl> + if ( ! isEnabled ( ) ) { <nl> + return std : : unique_ptr < Subtable > ( nullptr ) ; <nl> + } <nl> + GenericBucket * base ; <nl> + uint64_t size ; <nl> + uint32_t mask ; <nl> + uint32_t shift ; <nl> + <nl> + _state . lock ( ) ; <nl> + std : : shared_ptr < Table > source = _auxiliary - > shared_from_this ( ) ; <nl> + TRI_ASSERT ( _auxiliary . get ( ) ! = nullptr ) ; <nl> + if ( _logSize > _auxiliary - > _logSize ) { <nl> + uint32_t diff = _logSize - _auxiliary - > _logSize ; <nl> + base = & ( _auxiliary - > _buckets [ index > > diff ] ) ; <nl> + size = 1 ; <nl> + mask = 0 ; <nl> + shift = 0 ; <nl> + } else { <nl> + uint32_t diff = _auxiliary - > _logSize - _logSize ; <nl> + base = & ( _auxiliary - > _buckets [ index < < diff ] ) ; <nl> + size = static_cast < uint64_t > ( 1 ) < < diff ; <nl> + mask = ( ( size - 1 ) < < _auxiliary - > _shift ) ; <nl> + shift = _auxiliary - > _shift ; <nl> + } <nl> + _state . unlock ( ) ; <nl> + <nl> + return std : : make_unique < Subtable > ( source , base , size , mask , shift ) ; <nl> + } <nl> + <nl> + void Table : : setTypeSpecifics ( BucketClearer clearer , size_t slotsPerBucket ) { <nl> + _bucketClearer = clearer ; <nl> + _slotsTotal = _size * static_cast < uint64_t > ( slotsPerBucket ) ; <nl> + } <nl> + <nl> + void Table : : clear ( ) { <nl> + disable ( ) ; <nl> + if ( _auxiliary . get ( ) ! = nullptr ) { <nl> + throw ; <nl> + } <nl> + for ( uint64_t i = 0 ; i < _size ; i + + ) { <nl> + _bucketClearer ( & ( _buckets [ i ] ) ) ; <nl> + } <nl> + _bucketClearer = Table : : defaultClearer ; <nl> + _slotsUsed = 0 ; <nl> + } <nl> + <nl> + void Table : : disable ( ) { <nl> + _state . lock ( ) ; <nl> + if ( ! _state . isSet ( State : : Flag : : disabled ) ) { <nl> + _state . toggleFlag ( State : : Flag : : disabled ) ; <nl> + } <nl> + _state . unlock ( ) ; <nl> + } <nl> + <nl> + void Table : : enable ( ) { <nl> + _state . lock ( ) ; <nl> + if ( _state . isSet ( State : : Flag : : disabled ) ) { <nl> + _state . toggleFlag ( State : : Flag : : disabled ) ; <nl> + } <nl> + _state . unlock ( ) ; <nl> + } <nl> + <nl> + bool Table : : isEnabled ( int64_t maxTries ) { <nl> + bool ok = _state . lock ( maxTries ) ; <nl> + if ( ok ) { <nl> + ok = ! _state . isSet ( State : : Flag : : disabled ) ; <nl> + _state . unlock ( ) ; <nl> + } <nl> + return ok ; <nl> + } <nl> + <nl> + bool Table : : slotFilled ( ) { <nl> + return ( ( static_cast < double > ( + + _slotsUsed ) / <nl> + static_cast < double > ( _slotsTotal ) ) > Table : : idealUpperRatio ) ; <nl> + } <nl> + <nl> + bool Table : : slotEmptied ( ) { <nl> + return ( ( ( static_cast < double > ( - - _slotsUsed ) / <nl> + static_cast < double > ( _slotsTotal ) ) < Table : : idealLowerRatio ) & & <nl> + ( _logSize > Table : : minLogSize ) ) ; <nl> + } <nl> + <nl> + uint32_t Table : : idealSize ( ) const { <nl> + return ( ( ( static_cast < double > ( _slotsUsed . load ( ) ) / <nl> + static_cast < double > ( _slotsTotal ) ) > Table : : idealUpperRatio ) <nl> + ? ( logSize ( ) + 1 ) <nl> + : ( ( ( static_cast < double > ( _slotsUsed . load ( ) ) / <nl> + static_cast < double > ( _slotsTotal ) ) < Table : : idealLowerRatio ) <nl> + ? ( logSize ( ) - 1 ) <nl> + : logSize ( ) ) ) ; <nl> + } <nl> + <nl> + void Table : : defaultClearer ( void * ptr ) { <nl> + throw std : : invalid_argument ( " must register a clearer " ) ; <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . f02630b27ed <nl> mmm / dev / null <nl> ppp b / arangod / Cache / Table . h <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / DISCLAIMER <nl> + / / / <nl> + / / / Copyright 2014 - 2017 ArangoDB GmbH , Cologne , Germany <nl> + / / / Copyright 2004 - 2014 triAGENS GmbH , Cologne , Germany <nl> + / / / <nl> + / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / / you may not use this file except in compliance with the License . <nl> + / / / You may obtain a copy of the License at <nl> + / / / <nl> + / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / / <nl> + / / / Unless required by applicable law or agreed to in writing , software <nl> + / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / / See the License for the specific language governing permissions and <nl> + / / / limitations under the License . <nl> + / / / <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> + / / / <nl> + / / / @ author Daniel H . Larkin <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + # ifndef ARANGODB_CACHE_TABLE_H <nl> + # define ARANGODB_CACHE_TABLE_H <nl> + <nl> + # include " Basics / Common . h " <nl> + # include " Cache / Common . h " <nl> + # include " Cache / State . h " <nl> + <nl> + # include < stdint . h > <nl> + # include < memory > <nl> + <nl> + namespace arangodb { <nl> + namespace cache { <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Class to manage operations on a table of buckets . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + class Table : public std : : enable_shared_from_this < Table > { <nl> + public : <nl> + static const uint32_t minLogSize ; <nl> + static const uint32_t maxLogSize ; <nl> + static constexpr uint32_t standardLogSizeAdjustment = 6 ; <nl> + static constexpr int64_t triesGuarantee = - 1 ; <nl> + <nl> + typedef std : : function < void ( void * ) > BucketClearer ; <nl> + <nl> + private : <nl> + struct alignas ( BUCKET_SIZE ) GenericBucket { <nl> + State _state ; <nl> + uint8_t _filler [ BUCKET_SIZE - sizeof ( State ) ] ; <nl> + bool lock ( int64_t maxTries ) ; <nl> + void unlock ( ) ; <nl> + bool isMigrated ( ) const ; <nl> + } ; <nl> + static_assert ( sizeof ( GenericBucket ) = = BUCKET_SIZE , <nl> + " Expected sizeof ( GenericBucket ) = = BUCKET_SIZE . " ) ; <nl> + <nl> + public : <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Helper class for migration . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + struct Subtable { <nl> + Subtable ( std : : shared_ptr < Table > source , GenericBucket * buckets , <nl> + uint64_t size , uint32_t mask , uint32_t shift ) ; <nl> + void * fetchBucket ( uint32_t hash ) ; <nl> + bool applyToAllBuckets ( std : : function < bool ( void * ) > cb ) ; <nl> + <nl> + private : <nl> + std : : shared_ptr < Table > _source ; <nl> + GenericBucket * _buckets ; <nl> + uint64_t _size ; <nl> + uint32_t _mask ; <nl> + uint32_t _shift ; <nl> + } ; <nl> + <nl> + public : <nl> + Table ( ) = delete ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Construct a new table of size 2 ^ ( logSize ) in disabled state . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + Table ( uint32_t logSize ) ; <nl> + <nl> + public : <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Returns the memory usage for a table with specified logSize <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + static uint64_t allocationSize ( uint32_t logSize ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Returns the memory usage of the table . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + uint64_t memoryUsage ( ) const ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Returns the number of buckets in the table . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + uint64_t size ( ) const ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Returns the logSize of the table . ( 2 ^ ( logSize ( ) ) = = size ( ) ) <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + uint32_t logSize ( ) const ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Fetches a pointer to the bucket mapped by the given hash , and locks <nl> + / / / it . <nl> + / / / <nl> + / / / Returns nullptrs if it could not lock the bucket within maxTries <nl> + / / / attempts . If maxTries is negative , it will not limit the number of <nl> + / / / attempts . If the primary bucket is migrated , it will attempt a lookup in <nl> + / / / the auxiliary table . The second member of the returned pair is the source <nl> + / / / table for the bucket returned as the first member . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + std : : pair < void * , std : : shared_ptr < Table > > fetchAndLockBucket ( <nl> + uint32_t hash , int64_t maxTries = - 1 ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Sets the auxiliary table . <nl> + / / / <nl> + / / / If the parameter is non - null , then the return value will be null if <nl> + / / / successful and equal to the parameter otherwise . If the parameter is null , <nl> + / / / then the return value will be the existing auxiliary table ( possibly <nl> + / / / null ) . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + std : : shared_ptr < Table > setAuxiliary ( std : : shared_ptr < Table > table ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Returns a pointer to the specified bucket in the primary table , <nl> + / / / regardless of migration status . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + void * primaryBucket ( uint32_t index ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Returns a subtable in the auxiliary index which corresponds to the <nl> + / / / specified bucket in the primary table . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + std : : unique_ptr < Table : : Subtable > auxiliaryBuckets ( uint32_t index ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Set cache - type - specific members . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + void setTypeSpecifics ( BucketClearer clearer , size_t slotsPerBucket ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Reset table to fully empty state . Disables table . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + void clear ( ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Enables table . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + void enable ( ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Report that a slot was filled . <nl> + / / / <nl> + / / / If this causes the fill ratio to exceed the ideal upper limit , the return <nl> + / / / value will be true , and the cache should request migration to a larger <nl> + / / / table . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + bool slotFilled ( ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Report that a slot was emptied . <nl> + / / / <nl> + / / / If this causes the fill ratio to fall below the ideal lower limit , the <nl> + / / / return value will be true , and the cache should request migration to a <nl> + / / / smaller table . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + bool slotEmptied ( ) ; <nl> + <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief Returns the ideal size of the table based on fill ratio . <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + uint32_t idealSize ( ) const ; <nl> + <nl> + private : <nl> + static constexpr double idealLowerRatio = 0 . 125 ; <nl> + static constexpr double idealUpperRatio = 0 . 75 ; <nl> + <nl> + State _state ; <nl> + <nl> + uint32_t _logSize ; <nl> + uint64_t _size ; <nl> + uint32_t _shift ; <nl> + uint32_t _mask ; <nl> + std : : unique_ptr < GenericBucket [ ] > _buckets ; <nl> + <nl> + std : : shared_ptr < Table > _auxiliary ; <nl> + <nl> + BucketClearer _bucketClearer ; <nl> + <nl> + uint64_t _slotsTotal ; <nl> + std : : atomic < uint64_t > _slotsUsed ; <nl> + <nl> + private : <nl> + void disable ( ) ; <nl> + bool isEnabled ( int64_t maxTries = triesGuarantee ) ; <nl> + static void defaultClearer ( void * ptr ) ; <nl> + } ; <nl> + <nl> + } ; / / end namespace cache <nl> + } ; / / end namespace arangodb <nl> + <nl> + # endif <nl> mmm a / arangod / Cache / TransactionalBucket . cpp <nl> ppp b / arangod / Cache / TransactionalBucket . cpp <nl> <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - size_t TransactionalBucket : : SLOTS_DATA = 3 ; <nl> - size_t TransactionalBucket : : SLOTS_BLACKLIST = 4 ; <nl> - <nl> TransactionalBucket : : TransactionalBucket ( ) { <nl> _state . lock ( ) ; <nl> clear ( ) ; <nl> } <nl> <nl> - bool TransactionalBucket : : lock ( uint64_t transactionTerm , int64_t maxTries ) { <nl> - return _state . lock ( maxTries , [ this , transactionTerm ] ( ) - > void { <nl> - updateBlacklistTerm ( transactionTerm ) ; <nl> - } ) ; <nl> + bool TransactionalBucket : : lock ( int64_t maxTries ) { <nl> + return _state . lock ( maxTries ) ; <nl> } <nl> <nl> void TransactionalBucket : : unlock ( ) { <nl> bool TransactionalBucket : : isFullyBlacklisted ( ) const { <nl> bool TransactionalBucket : : isFull ( ) const { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> bool hasEmptySlot = false ; <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> - size_t slot = SLOTS_DATA - ( i + 1 ) ; <nl> - if ( _cachedHashes [ slot ] = = 0 ) { <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> + size_t slot = slotsData - ( i + 1 ) ; <nl> + if ( _cachedData [ slot ] = = nullptr ) { <nl> hasEmptySlot = true ; <nl> break ; <nl> } <nl> CachedValue * TransactionalBucket : : find ( uint32_t hash , void const * key , <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> CachedValue * result = nullptr ; <nl> <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> - if ( _cachedHashes [ i ] = = 0 ) { <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> + if ( _cachedData [ i ] = = nullptr ) { <nl> break ; <nl> } <nl> if ( _cachedHashes [ i ] = = hash & & _cachedData [ i ] - > sameKey ( key , keySize ) ) { <nl> void TransactionalBucket : : insert ( uint32_t hash , CachedValue * value ) { <nl> return ; <nl> } <nl> <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> - if ( _cachedHashes [ i ] = = 0 ) { <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> + if ( _cachedData [ i ] = = nullptr ) { <nl> / / found an empty slot <nl> _cachedHashes [ i ] = hash ; <nl> _cachedData [ i ] = value ; <nl> CachedValue * TransactionalBucket : : blacklist ( uint32_t hash , void const * key , <nl> return value ; <nl> } <nl> <nl> - for ( size_t i = 0 ; i < SLOTS_BLACKLIST ; i + + ) { <nl> + for ( size_t i = 0 ; i < slotsBlacklist ; i + + ) { <nl> if ( _blacklistHashes [ i ] = = 0 ) { <nl> / / found an empty slot <nl> _blacklistHashes [ i ] = hash ; <nl> bool TransactionalBucket : : isBlacklisted ( uint32_t hash ) const { <nl> } <nl> <nl> bool blacklisted = false ; <nl> - for ( size_t i = 0 ; i < SLOTS_BLACKLIST ; i + + ) { <nl> + for ( size_t i = 0 ; i < slotsBlacklist ; i + + ) { <nl> if ( _blacklistHashes [ i ] = = hash ) { <nl> blacklisted = true ; <nl> break ; <nl> bool TransactionalBucket : : isBlacklisted ( uint32_t hash ) const { <nl> <nl> CachedValue * TransactionalBucket : : evictionCandidate ( bool ignoreRefCount ) const { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> - size_t slot = SLOTS_DATA - ( i + 1 ) ; <nl> - if ( _cachedHashes [ slot ] = = 0 ) { <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> + size_t slot = slotsData - ( i + 1 ) ; <nl> + if ( _cachedData [ slot ] = = nullptr ) { <nl> continue ; <nl> } <nl> if ( ignoreRefCount | | _cachedData [ slot ] - > isFreeable ( ) ) { <nl> CachedValue * TransactionalBucket : : evictionCandidate ( bool ignoreRefCount ) const { <nl> <nl> void TransactionalBucket : : evict ( CachedValue * value , bool optimizeForInsertion ) { <nl> TRI_ASSERT ( isLocked ( ) ) ; <nl> - for ( size_t i = 0 ; i < SLOTS_DATA ; i + + ) { <nl> - size_t slot = SLOTS_DATA - ( i + 1 ) ; <nl> + for ( size_t i = 0 ; i < slotsData ; i + + ) { <nl> + size_t slot = slotsData - ( i + 1 ) ; <nl> if ( _cachedData [ slot ] = = value ) { <nl> / / found a match <nl> _cachedHashes [ slot ] = 0 ; <nl> void TransactionalBucket : : updateBlacklistTerm ( uint64_t term ) { <nl> _state . toggleFlag ( State : : Flag : : blacklisted ) ; <nl> } <nl> <nl> - memset ( _blacklistHashes , 0 , ( SLOTS_BLACKLIST * sizeof ( uint32_t ) ) ) ; <nl> + memset ( _blacklistHashes , 0 , ( slotsBlacklist * sizeof ( uint32_t ) ) ) ; <nl> } <nl> } <nl> <nl> void TransactionalBucket : : moveSlot ( size_t slot , bool moveToFront ) { <nl> } <nl> } else { <nl> / / move slot to back <nl> - for ( ; ( i < SLOTS_DATA - 1 ) & & ( _cachedHashes [ i + 1 ] ! = 0 ) ; i + + ) { <nl> + for ( ; ( i < slotsData - 1 ) & & ( _cachedHashes [ i + 1 ] ! = 0 ) ; i + + ) { <nl> _cachedHashes [ i ] = _cachedHashes [ i + 1 ] ; <nl> _cachedData [ i ] = _cachedData [ i + 1 ] ; <nl> } <nl> mmm a / arangod / Cache / TransactionalBucket . h <nl> ppp b / arangod / Cache / TransactionalBucket . h <nl> <nl> <nl> # include " Basics / Common . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / State . h " <nl> <nl> # include < stdint . h > <nl> namespace cache { <nl> / / / Data entries are carefully laid out to ensure the structure fits in a single <nl> / / / cacheline . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - struct alignas ( 64 ) TransactionalBucket { <nl> + struct alignas ( BUCKET_SIZE ) TransactionalBucket { <nl> State _state ; <nl> <nl> / / actual cached entries <nl> - uint32_t _cachedHashes [ 3 ] ; <nl> - CachedValue * _cachedData [ 3 ] ; <nl> - static size_t SLOTS_DATA ; <nl> + static constexpr size_t slotsData = 3 ; <nl> + uint32_t _cachedHashes [ slotsData ] ; <nl> + CachedValue * _cachedData [ slotsData ] ; <nl> <nl> / / blacklist entries for transactional semantics <nl> - uint32_t _blacklistHashes [ 4 ] ; <nl> + static constexpr size_t slotsBlacklist = 4 ; <nl> + uint32_t _blacklistHashes [ slotsBlacklist ] ; <nl> uint64_t _blacklistTerm ; <nl> - static size_t SLOTS_BLACKLIST ; <nl> <nl> / / padding , if necessary ? <nl> # ifdef TRI_PADDING_32 <nl> struct alignas ( 64 ) TransactionalBucket { <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Attempt to lock bucket ( failing after maxTries attempts ) . <nl> - / / / <nl> - / / / If the bucket is successfully locked , the transaction term is updated . <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - bool lock ( uint64_t transactionTerm , int64_t maxTries ) ; <nl> + bool lock ( int64_t maxTries ) ; <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / / @ brief Unlock the bucket . Requires bucket to be locked . <nl> struct alignas ( 64 ) TransactionalBucket { <nl> bool haveOpenTransaction ( ) const ; <nl> } ; <nl> <nl> + / / ensure that TransactionalBucket is exactly BUCKET_SIZE <nl> + static_assert ( sizeof ( TransactionalBucket ) = = BUCKET_SIZE , <nl> + " Expected sizeof ( TransactionalBucket ) = = BUCKET_SIZE . " ) ; <nl> + <nl> } ; / / end namespace cache <nl> } ; / / end namespace arangodb <nl> <nl> mmm a / arangod / Cache / TransactionalCache . cpp <nl> ppp b / arangod / Cache / TransactionalCache . cpp <nl> <nl> # include " Basics / Common . h " <nl> # include " Cache / Cache . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / FrequencyBuffer . h " <nl> # include " Cache / Metadata . h " <nl> # include " Cache / State . h " <nl> + # include " Cache / Table . h " <nl> # include " Cache / TransactionalBucket . h " <nl> - # include " Random / RandomGenerator . h " <nl> <nl> # include < stdint . h > <nl> # include < atomic > <nl> # include < chrono > <nl> # include < list > <nl> <nl> - using namespace arangodb : : cache ; <nl> + # include < iostream > <nl> <nl> - static constexpr int64_t TRIES_FAST = 50LL ; <nl> - static constexpr int64_t TRIES_SLOW = 10000LL ; <nl> - static constexpr int64_t TRIES_GUARANTEE = - 1LL ; <nl> + using namespace arangodb : : cache ; <nl> <nl> Cache : : Finding TransactionalCache : : find ( void const * key , uint32_t keySize ) { <nl> TRI_ASSERT ( key ! = nullptr ) ; <nl> Cache : : Finding TransactionalCache : : find ( void const * key , uint32_t keySize ) { <nl> <nl> bool ok ; <nl> TransactionalBucket * bucket ; <nl> - std : : tie ( ok , bucket ) = getBucket ( hash , TRIES_FAST ) ; <nl> + std : : shared_ptr < Table > source ; <nl> + std : : tie ( ok , bucket , source ) = getBucket ( hash , Cache : : triesFast ) ; <nl> <nl> if ( ok ) { <nl> result . reset ( bucket - > find ( hash , key , keySize ) ) ; <nl> bool TransactionalCache : : insert ( CachedValue * value ) { <nl> <nl> bool ok ; <nl> TransactionalBucket * bucket ; <nl> - std : : tie ( ok , bucket ) = getBucket ( hash , TRIES_FAST ) ; <nl> + std : : shared_ptr < Table > source ; <nl> + std : : tie ( ok , bucket , source ) = getBucket ( hash , Cache : : triesFast ) ; <nl> <nl> if ( ok ) { <nl> + bool maybeMigrate = false ; <nl> bool allowed = ! bucket - > isBlacklisted ( hash ) ; <nl> if ( allowed ) { <nl> bool eviction = false ; <nl> bool TransactionalCache : : insert ( CachedValue * value ) { <nl> candidate = bucket - > evictionCandidate ( ) ; <nl> if ( candidate = = nullptr ) { <nl> allowed = false ; <nl> - } else { <nl> - eviction = true ; <nl> } <nl> } <nl> <nl> bool TransactionalCache : : insert ( CachedValue * value ) { <nl> change - = candidate - > size ( ) ; <nl> } <nl> <nl> - _metadata - > lock ( ) ; <nl> - allowed = _metadata - > adjustUsageIfAllowed ( change ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . lock ( ) ; <nl> + allowed = _metadata . adjustUsageIfAllowed ( change ) ; <nl> + _metadata . unlock ( ) ; <nl> <nl> if ( allowed ) { <nl> if ( candidate ! = nullptr ) { <nl> bucket - > evict ( candidate , true ) ; <nl> freeValue ( candidate ) ; <nl> + eviction = true ; <nl> } <nl> - recordStat ( eviction ? Stat : : insertEviction : Stat : : insertNoEviction ) ; <nl> bucket - > insert ( hash , value ) ; <nl> inserted = true ; <nl> + if ( ! eviction ) { <nl> + maybeMigrate = source - > slotFilled ( ) ; <nl> + } <nl> } else { <nl> - requestResize ( ) ; / / let function do the hard work <nl> + requestGrow ( ) ; / / let function do the hard work <nl> } <nl> } <nl> } <nl> <nl> bucket - > unlock ( ) ; <nl> - if ( inserted ) { <nl> - requestMigrate ( ) ; / / let function do the hard work <nl> + if ( maybeMigrate ) { <nl> + requestMigrate ( _table - > idealSize ( ) ) ; / / let function do the hard work <nl> } <nl> endOperation ( ) ; <nl> } <nl> bool TransactionalCache : : remove ( void const * key , uint32_t keySize ) { <nl> <nl> bool ok ; <nl> TransactionalBucket * bucket ; <nl> - std : : tie ( ok , bucket ) = getBucket ( hash , TRIES_SLOW ) ; <nl> + std : : shared_ptr < Table > source ; <nl> + std : : tie ( ok , bucket , source ) = getBucket ( hash , Cache : : triesSlow ) ; <nl> <nl> if ( ok ) { <nl> + bool maybeMigrate = false ; <nl> CachedValue * candidate = bucket - > remove ( hash , key , keySize ) ; <nl> <nl> if ( candidate ! = nullptr ) { <nl> int64_t change = - static_cast < int64_t > ( candidate - > size ( ) ) ; <nl> <nl> - _metadata - > lock ( ) ; <nl> - bool allowed = _metadata - > adjustUsageIfAllowed ( change ) ; <nl> + _metadata . lock ( ) ; <nl> + bool allowed = _metadata . adjustUsageIfAllowed ( change ) ; <nl> TRI_ASSERT ( allowed ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . unlock ( ) ; <nl> <nl> freeValue ( candidate ) ; <nl> + maybeMigrate = source - > slotEmptied ( ) ; <nl> } <nl> <nl> removed = true ; <nl> bucket - > unlock ( ) ; <nl> + if ( maybeMigrate ) { <nl> + requestMigrate ( _table - > idealSize ( ) ) ; <nl> + } <nl> endOperation ( ) ; <nl> } <nl> <nl> bool TransactionalCache : : blacklist ( void const * key , uint32_t keySize ) { <nl> <nl> bool ok ; <nl> TransactionalBucket * bucket ; <nl> - std : : tie ( ok , bucket ) = getBucket ( hash , TRIES_SLOW ) ; <nl> + std : : shared_ptr < Table > source ; <nl> + std : : tie ( ok , bucket , source ) = getBucket ( hash , Cache : : triesSlow ) ; <nl> <nl> if ( ok ) { <nl> + bool maybeMigrate = false ; <nl> CachedValue * candidate = bucket - > blacklist ( hash , key , keySize ) ; <nl> blacklisted = true ; <nl> <nl> if ( candidate ! = nullptr ) { <nl> int64_t change = - static_cast < int64_t > ( candidate - > size ( ) ) ; <nl> <nl> - _metadata - > lock ( ) ; <nl> - bool allowed = _metadata - > adjustUsageIfAllowed ( change ) ; <nl> + _metadata . lock ( ) ; <nl> + bool allowed = _metadata . adjustUsageIfAllowed ( change ) ; <nl> TRI_ASSERT ( allowed ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _metadata . unlock ( ) ; <nl> <nl> freeValue ( candidate ) ; <nl> + maybeMigrate = source - > slotEmptied ( ) ; <nl> } <nl> <nl> bucket - > unlock ( ) ; <nl> + if ( maybeMigrate ) { <nl> + requestMigrate ( _table - > idealSize ( ) ) ; <nl> + } <nl> endOperation ( ) ; <nl> } <nl> <nl> bool TransactionalCache : : blacklist ( void const * key , uint32_t keySize ) { <nl> <nl> uint64_t TransactionalCache : : allocationSize ( bool enableWindowedStats ) { <nl> return sizeof ( TransactionalCache ) + <nl> - StatBuffer : : allocationSize ( _evictionStatsCapacity ) + <nl> ( enableWindowedStats ? ( sizeof ( StatBuffer ) + <nl> StatBuffer : : allocationSize ( _findStatsCapacity ) ) <nl> : 0 ) ; <nl> } <nl> <nl> std : : shared_ptr < Cache > TransactionalCache : : create ( Manager * manager , <nl> - Manager : : MetadataItr metadata , <nl> - bool allowGrowth , <nl> + Metadata metadata , <nl> + std : : shared_ptr < Table > table , <nl> bool enableWindowedStats ) { <nl> return std : : make_shared < TransactionalCache > ( Cache : : ConstructionGuard ( ) , <nl> - manager , metadata , allowGrowth , <nl> + manager , metadata , table , <nl> enableWindowedStats ) ; <nl> } <nl> <nl> TransactionalCache : : TransactionalCache ( Cache : : ConstructionGuard guard , <nl> - Manager * manager , <nl> - Manager : : MetadataItr metadata , <nl> - bool allowGrowth , <nl> + Manager * manager , Metadata metadata , <nl> + std : : shared_ptr < Table > table , <nl> bool enableWindowedStats ) <nl> - : Cache ( guard , manager , metadata , allowGrowth , enableWindowedStats ) , <nl> - _table ( nullptr ) , <nl> - _logSize ( 0 ) , <nl> - _tableSize ( 1 ) , <nl> - _maskShift ( 32 ) , <nl> - _bucketMask ( 0 ) , <nl> - _auxiliaryTable ( nullptr ) , <nl> - _auxiliaryLogSize ( 0 ) , <nl> - _auxiliaryTableSize ( 1 ) , <nl> - _auxiliaryMaskShift ( 32 ) , <nl> - _auxiliaryBucketMask ( 0 ) { <nl> - _state . lock ( ) ; <nl> - if ( isOperational ( ) ) { <nl> - _metadata - > lock ( ) ; <nl> - _table = reinterpret_cast < TransactionalBucket * > ( _metadata - > table ( ) ) ; <nl> - _logSize = _metadata - > logSize ( ) ; <nl> - _tableSize = ( 1ULL < < _logSize ) ; <nl> - _maskShift = 32 - _logSize ; <nl> - _bucketMask = ( _tableSize - 1 ) < < _maskShift ; <nl> - _metadata - > unlock ( ) ; <nl> - } <nl> - _state . unlock ( ) ; <nl> + : Cache ( guard , manager , metadata , table , enableWindowedStats , <nl> + TransactionalCache : : bucketClearer , TransactionalBucket : : slotsData ) { <nl> } <nl> <nl> TransactionalCache : : ~ TransactionalCache ( ) { <nl> TransactionalCache : : ~ TransactionalCache ( ) { <nl> } <nl> } <nl> <nl> - bool TransactionalCache : : freeMemory ( ) { <nl> - _state . lock ( ) ; <nl> - if ( ! isOperational ( ) ) { <nl> - _state . unlock ( ) ; <nl> - return false ; <nl> - } <nl> - startOperation ( ) ; <nl> - _state . unlock ( ) ; <nl> - <nl> - bool underLimit = reclaimMemory ( 0ULL ) ; <nl> - uint64_t failures = 0 ; <nl> - while ( ! underLimit ) { <nl> - / / pick a random bucket <nl> - uint32_t randomHash = RandomGenerator : : interval ( UINT32_MAX ) ; <nl> - bool ok ; <nl> - TransactionalBucket * bucket ; <nl> - std : : tie ( ok , bucket ) = getBucket ( randomHash , TRIES_FAST , false ) ; <nl> - <nl> - if ( ok ) { <nl> - failures = 0 ; <nl> - / / evict LRU freeable value if exists <nl> - CachedValue * candidate = bucket - > evictionCandidate ( ) ; <nl> - <nl> - if ( candidate ! = nullptr ) { <nl> - uint64_t size = candidate - > size ( ) ; <nl> - bucket - > evict ( candidate ) ; <nl> - freeValue ( candidate ) ; <nl> + uint64_t TransactionalCache : : freeMemoryFrom ( uint32_t hash ) { <nl> + uint64_t reclaimed = 0 ; <nl> + bool ok ; <nl> + TransactionalBucket * bucket ; <nl> + std : : shared_ptr < Table > source ; <nl> + std : : tie ( ok , bucket , source ) = getBucket ( hash , Cache : : triesFast , false ) ; <nl> <nl> - underLimit = reclaimMemory ( size ) ; <nl> - } <nl> + if ( ok ) { <nl> + bool maybeMigrate = false ; <nl> + / / evict LRU freeable value if exists <nl> + CachedValue * candidate = bucket - > evictionCandidate ( ) ; <nl> <nl> - bucket - > unlock ( ) ; <nl> - } else { <nl> - failures + + ; <nl> - if ( failures > 100 ) { <nl> - _state . lock ( ) ; <nl> - bool shouldQuit = ! isOperational ( ) ; <nl> - _state . unlock ( ) ; <nl> + if ( candidate ! = nullptr ) { <nl> + reclaimed = candidate - > size ( ) ; <nl> + bucket - > evict ( candidate ) ; <nl> + freeValue ( candidate ) ; <nl> + maybeMigrate = source - > slotEmptied ( ) ; <nl> + } <nl> <nl> - if ( shouldQuit ) { <nl> - break ; <nl> - } else { <nl> - failures = 0 ; <nl> - } <nl> - } <nl> + bucket - > unlock ( ) ; <nl> + if ( maybeMigrate ) { <nl> + requestMigrate ( _table - > idealSize ( ) ) ; <nl> } <nl> } <nl> <nl> - endOperation ( ) ; <nl> - return true ; <nl> + return reclaimed ; <nl> } <nl> <nl> - bool TransactionalCache : : migrate ( ) { <nl> - _state . lock ( ) ; <nl> - if ( ! isOperational ( ) ) { <nl> - _state . unlock ( ) ; <nl> - return false ; <nl> - } <nl> - startOperation ( ) ; <nl> - _metadata - > lock ( ) ; <nl> - if ( _metadata - > table ( ) = = nullptr | | _metadata - > auxiliaryTable ( ) = = nullptr ) { <nl> - _metadata - > unlock ( ) ; <nl> - _state . unlock ( ) ; <nl> - endOperation ( ) ; <nl> - return false ; <nl> - } <nl> - _auxiliaryTable = <nl> - reinterpret_cast < TransactionalBucket * > ( _metadata - > auxiliaryTable ( ) ) ; <nl> - _auxiliaryLogSize = _metadata - > auxiliaryLogSize ( ) ; <nl> - _auxiliaryTableSize = ( 1ULL < < _auxiliaryLogSize ) ; <nl> - _auxiliaryMaskShift = ( 32 - _auxiliaryLogSize ) ; <nl> - _auxiliaryBucketMask = ( _auxiliaryTableSize - 1 ) < < _auxiliaryMaskShift ; <nl> - _metadata - > unlock ( ) ; <nl> - _state . toggleFlag ( State : : Flag : : migrating ) ; <nl> - _state . unlock ( ) ; <nl> - <nl> + void TransactionalCache : : migrateBucket ( void * sourcePtr , <nl> + std : : unique_ptr < Table : : Subtable > targets , <nl> + std : : shared_ptr < Table > newTable ) { <nl> uint64_t term = _manager - > _transactions . term ( ) ; <nl> <nl> - for ( uint32_t i = 0 ; i < _tableSize ; i + + ) { <nl> - / / lock current bucket <nl> - TransactionalBucket * bucket = & ( _table [ i ] ) ; <nl> - bucket - > lock ( term , - 1LL ) ; <nl> - term = std : : max ( term , bucket - > _blacklistTerm ) ; <nl> - <nl> - / / collect target bucket ( s ) <nl> - std : : vector < TransactionalBucket * > targets ; <nl> - if ( _logSize > _auxiliaryLogSize ) { <nl> - uint32_t targetIndex = ( i < < _maskShift ) > > _auxiliaryMaskShift ; <nl> - targets . emplace_back ( & ( _auxiliaryTable [ targetIndex ] ) ) ; <nl> - } else { <nl> - uint32_t baseIndex = ( i < < _maskShift ) > > _auxiliaryMaskShift ; <nl> - for ( size_t j = 0 ; j < ( 1U < < ( _auxiliaryLogSize - _logSize ) ) ; j + + ) { <nl> - uint32_t targetIndex = baseIndex + j ; <nl> - targets . emplace_back ( & ( _auxiliaryTable [ targetIndex ] ) ) ; <nl> - } <nl> - } <nl> - / / lock target bucket ( s ) <nl> - for ( TransactionalBucket * targetBucket : targets ) { <nl> - targetBucket - > lock ( term , TRIES_GUARANTEE ) ; <nl> - term = std : : max ( term , targetBucket - > _blacklistTerm ) ; <nl> - } <nl> - <nl> - / / update all buckets to maximum term found ( guaranteed at most the current ) <nl> - bucket - > updateBlacklistTerm ( term ) ; <nl> - for ( TransactionalBucket * targetBucket : targets ) { <nl> - targetBucket - > updateBlacklistTerm ( term ) ; <nl> - } <nl> - / / now actually migrate any relevant blacklist terms <nl> - if ( bucket - > isFullyBlacklisted ( ) ) { <nl> - for ( TransactionalBucket * targetBucket : targets ) { <nl> - if ( ! targetBucket - > isFullyBlacklisted ( ) ) { <nl> - ( * targetBucket ) . _state . toggleFlag ( State : : Flag : : blacklisted ) ; <nl> - } <nl> + / / lock current bucket <nl> + auto source = reinterpret_cast < TransactionalBucket * > ( sourcePtr ) ; <nl> + source - > lock ( Cache : : triesGuarantee ) ; <nl> + term = std : : max ( term , source - > _blacklistTerm ) ; <nl> + <nl> + / / lock target bucket ( s ) <nl> + targets - > applyToAllBuckets ( [ & term ] ( void * ptr ) - > bool { <nl> + auto targetBucket = reinterpret_cast < TransactionalBucket * > ( ptr ) ; <nl> + bool locked = targetBucket - > lock ( Cache : : triesGuarantee ) ; <nl> + term = std : : max ( term , targetBucket - > _blacklistTerm ) ; <nl> + return locked ; <nl> + } ) ; <nl> + <nl> + / / update all buckets to maximum term found ( guaranteed at most the current ) <nl> + source - > updateBlacklistTerm ( term ) ; <nl> + targets - > applyToAllBuckets ( [ & term ] ( void * ptr ) - > bool { <nl> + auto targetBucket = reinterpret_cast < TransactionalBucket * > ( ptr ) ; <nl> + targetBucket - > updateBlacklistTerm ( term ) ; <nl> + return true ; <nl> + } ) ; <nl> + / / now actually migrate any relevant blacklist terms <nl> + if ( source - > isFullyBlacklisted ( ) ) { <nl> + targets - > applyToAllBuckets ( [ ] ( void * ptr ) - > bool { <nl> + auto targetBucket = reinterpret_cast < TransactionalBucket * > ( ptr ) ; <nl> + if ( ! targetBucket - > isFullyBlacklisted ( ) ) { <nl> + targetBucket - > _state . toggleFlag ( State : : Flag : : blacklisted ) ; <nl> } <nl> - } else { <nl> - for ( size_t j = 0 ; j < TransactionalBucket : : SLOTS_BLACKLIST ; j + + ) { <nl> - uint32_t hash = bucket - > _blacklistHashes [ j ] ; <nl> - if ( hash = = 0 ) { <nl> - break ; <nl> - } <nl> - uint32_t targetIndex = getIndex ( hash , true ) ; <nl> - TransactionalBucket * targetBucket = & ( _auxiliaryTable [ targetIndex ] ) ; <nl> + return true ; <nl> + } ) ; <nl> + } else { <nl> + for ( size_t j = 0 ; j < TransactionalBucket : : slotsBlacklist ; j + + ) { <nl> + uint32_t hash = source - > _blacklistHashes [ j ] ; <nl> + if ( hash ! = 0 ) { <nl> + auto targetBucket = <nl> + reinterpret_cast < TransactionalBucket * > ( targets - > fetchBucket ( hash ) ) ; <nl> CachedValue * candidate = targetBucket - > blacklist ( hash , nullptr , 0 ) ; <nl> - TRI_ASSERT ( candidate = = nullptr ) ; <nl> - bucket - > _blacklistHashes [ j ] = 0 ; <nl> + if ( candidate ! = nullptr ) { <nl> + uint64_t size = candidate - > size ( ) ; <nl> + freeValue ( candidate ) ; <nl> + reclaimMemory ( size ) ; <nl> + newTable - > slotEmptied ( ) ; <nl> + } <nl> + source - > _blacklistHashes [ j ] = 0 ; <nl> } <nl> } <nl> + } <nl> <nl> - / / migrate actual values <nl> - for ( size_t j = 0 ; j < TransactionalBucket : : SLOTS_DATA ; j + + ) { <nl> - size_t k = TransactionalBucket : : SLOTS_DATA - ( j + 1 ) ; <nl> - if ( bucket - > _cachedHashes [ k ] ! = 0 ) { <nl> - uint32_t hash = bucket - > _cachedHashes [ k ] ; <nl> - CachedValue * value = bucket - > _cachedData [ k ] ; <nl> - <nl> - uint32_t targetIndex = getIndex ( hash , true ) ; <nl> - TransactionalBucket * targetBucket = & ( _auxiliaryTable [ targetIndex ] ) ; <nl> - if ( targetBucket - > isBlacklisted ( hash ) ) { <nl> + / / migrate actual values <nl> + for ( size_t j = 0 ; j < TransactionalBucket : : slotsData ; j + + ) { <nl> + size_t k = TransactionalBucket : : slotsData - ( j + 1 ) ; <nl> + if ( source - > _cachedData [ k ] ! = nullptr ) { <nl> + uint32_t hash = source - > _cachedHashes [ k ] ; <nl> + CachedValue * value = source - > _cachedData [ k ] ; <nl> + <nl> + auto targetBucket = <nl> + reinterpret_cast < TransactionalBucket * > ( targets - > fetchBucket ( hash ) ) ; <nl> + if ( targetBucket - > isBlacklisted ( hash ) ) { <nl> + uint64_t size = value - > size ( ) ; <nl> + freeValue ( value ) ; <nl> + reclaimMemory ( size ) ; <nl> + } else { <nl> + bool haveSpace = true ; <nl> + if ( targetBucket - > isFull ( ) ) { <nl> + CachedValue * candidate = targetBucket - > evictionCandidate ( ) ; <nl> + if ( candidate ! = nullptr ) { <nl> + targetBucket - > evict ( candidate , true ) ; <nl> + uint64_t size = candidate - > size ( ) ; <nl> + freeValue ( candidate ) ; <nl> + reclaimMemory ( size ) ; <nl> + newTable - > slotEmptied ( ) ; <nl> + } else { <nl> + haveSpace = false ; <nl> + } <nl> + } <nl> + if ( haveSpace ) { <nl> + targetBucket - > insert ( hash , value ) ; <nl> + newTable - > slotFilled ( ) ; <nl> + } else { <nl> uint64_t size = value - > size ( ) ; <nl> freeValue ( value ) ; <nl> reclaimMemory ( size ) ; <nl> - } else { <nl> - bool haveSpace = true ; <nl> - if ( targetBucket - > isFull ( ) ) { <nl> - CachedValue * candidate = targetBucket - > evictionCandidate ( ) ; <nl> - if ( candidate ! = nullptr ) { <nl> - targetBucket - > evict ( candidate , true ) ; <nl> - uint64_t size = candidate - > size ( ) ; <nl> - freeValue ( candidate ) ; <nl> - reclaimMemory ( size ) ; <nl> - } else { <nl> - haveSpace = false ; <nl> - } <nl> - } <nl> - if ( haveSpace ) { <nl> - targetBucket - > insert ( hash , value ) ; <nl> - } else { <nl> - uint64_t size = value - > size ( ) ; <nl> - freeValue ( value ) ; <nl> - reclaimMemory ( size ) ; <nl> - } <nl> } <nl> - <nl> - bucket - > _cachedHashes [ k ] = 0 ; <nl> - bucket - > _cachedData [ k ] = nullptr ; <nl> } <nl> - } <nl> <nl> - / / unlock targets <nl> - for ( TransactionalBucket * targetBucket : targets ) { <nl> - targetBucket - > unlock ( ) ; <nl> + source - > _cachedHashes [ k ] = 0 ; <nl> + source - > _cachedData [ k ] = nullptr ; <nl> } <nl> - <nl> - / / finish up this bucket ' s migration <nl> - bucket - > _state . toggleFlag ( State : : Flag : : migrated ) ; <nl> - bucket - > unlock ( ) ; <nl> } <nl> <nl> - / / swap tables and unmark local migrating flag <nl> - _state . lock ( ) ; <nl> - std : : swap ( _table , _auxiliaryTable ) ; <nl> - std : : swap ( _logSize , _auxiliaryLogSize ) ; <nl> - std : : swap ( _tableSize , _auxiliaryTableSize ) ; <nl> - std : : swap ( _maskShift , _auxiliaryMaskShift ) ; <nl> - std : : swap ( _bucketMask , _auxiliaryBucketMask ) ; <nl> - _state . toggleFlag ( State : : Flag : : migrating ) ; <nl> - _state . unlock ( ) ; <nl> - <nl> - / / clear out old table <nl> - clearTable ( _auxiliaryTable , _auxiliaryTableSize ) ; <nl> - <nl> - / / release references to old table <nl> - _state . lock ( ) ; <nl> - _auxiliaryTable = nullptr ; <nl> - _auxiliaryLogSize = 0 ; <nl> - _auxiliaryTableSize = 1 ; <nl> - _auxiliaryMaskShift = 32 ; <nl> - _auxiliaryBucketMask = 0 ; <nl> - _state . unlock ( ) ; <nl> - <nl> - / / swap table in metadata <nl> - _metadata - > lock ( ) ; <nl> - _metadata - > swapTables ( ) ; <nl> - _metadata - > unlock ( ) ; <nl> - <nl> - endOperation ( ) ; <nl> - return true ; <nl> - } <nl> + / / unlock targets <nl> + targets - > applyToAllBuckets ( [ ] ( void * ptr ) - > bool { <nl> + auto bucket = reinterpret_cast < TransactionalBucket * > ( ptr ) ; <nl> + bucket - > unlock ( ) ; <nl> + return true ; <nl> + } ) ; <nl> <nl> - void TransactionalCache : : clearTables ( ) { <nl> - if ( _table ! = nullptr ) { <nl> - clearTable ( _table , _tableSize ) ; <nl> - } <nl> - if ( _auxiliaryTable ! = nullptr ) { <nl> - clearTable ( _auxiliaryTable , _auxiliaryTableSize ) ; <nl> - } <nl> + / / finish up this bucket ' s migration <nl> + source - > _state . toggleFlag ( State : : Flag : : migrated ) ; <nl> + source - > unlock ( ) ; <nl> } <nl> <nl> - std : : pair < bool , TransactionalBucket * > TransactionalCache : : getBucket ( <nl> - uint32_t hash , int64_t maxTries , bool singleOperation ) { <nl> + std : : tuple < bool , TransactionalBucket * , std : : shared_ptr < Table > > <nl> + TransactionalCache : : getBucket ( uint32_t hash , int64_t maxTries , <nl> + bool singleOperation ) { <nl> TransactionalBucket * bucket = nullptr ; <nl> + std : : shared_ptr < Table > source ( nullptr ) ; <nl> <nl> bool ok = _state . lock ( maxTries ) ; <nl> if ( ok ) { <nl> std : : pair < bool , TransactionalBucket * > TransactionalCache : : getBucket ( <nl> if ( singleOperation ) { <nl> startOperation ( ) ; <nl> started = true ; <nl> - _metadata - > lock ( ) ; <nl> - _manager - > reportAccess ( _metadata - > cache ( ) ) ; <nl> - _metadata - > unlock ( ) ; <nl> + _manager - > reportAccess ( shared_from_this ( ) ) ; <nl> } <nl> <nl> uint64_t term = _manager - > _transactions . term ( ) ; <nl> - <nl> - bucket = & ( _table [ getIndex ( hash , false ) ] ) ; <nl> - ok = bucket - > lock ( term , maxTries ) ; <nl> - if ( ok & & <nl> - bucket - > isMigrated ( ) ) { / / get bucket from auxiliary table instead <nl> - bucket - > unlock ( ) ; <nl> - bucket = & ( _auxiliaryTable [ getIndex ( hash , true ) ] ) ; <nl> - ok = bucket - > lock ( term , maxTries ) ; <nl> - if ( ok & & bucket - > isMigrated ( ) ) { <nl> - ok = false ; <nl> - bucket - > unlock ( ) ; <nl> - } <nl> + auto pair = _table - > fetchAndLockBucket ( hash , maxTries ) ; <nl> + bucket = reinterpret_cast < TransactionalBucket * > ( pair . first ) ; <nl> + source = pair . second ; <nl> + ok = ( bucket ! = nullptr ) ; <nl> + if ( ok ) { <nl> + bucket - > updateBlacklistTerm ( term ) ; <nl> } <nl> } <nl> if ( ! ok & & started ) { <nl> std : : pair < bool , TransactionalBucket * > TransactionalCache : : getBucket ( <nl> _state . unlock ( ) ; <nl> } <nl> <nl> - return std : : pair < bool , TransactionalBucket * > ( ok , bucket ) ; <nl> + return std : : make_tuple ( ok , bucket , source ) ; <nl> } <nl> <nl> - void TransactionalCache : : clearTable ( TransactionalBucket * table , <nl> - uint64_t tableSize ) { <nl> - for ( uint64_t i = 0 ; i < tableSize ; i + + ) { <nl> - TransactionalBucket * bucket = & ( table [ i ] ) ; <nl> - bucket - > lock ( 0 , - 1LL ) ; / / term doesn ' t actually matter here <nl> - for ( size_t j = 0 ; j < TransactionalBucket : : SLOTS_DATA ; j + + ) { <nl> + Table : : BucketClearer TransactionalCache : : bucketClearer ( Metadata * metadata ) { <nl> + return [ metadata ] ( void * ptr ) - > void { <nl> + auto bucket = reinterpret_cast < TransactionalBucket * > ( ptr ) ; <nl> + bucket - > lock ( Cache : : triesGuarantee ) ; <nl> + for ( size_t j = 0 ; j < TransactionalBucket : : slotsData ; j + + ) { <nl> if ( bucket - > _cachedData [ j ] ! = nullptr ) { <nl> uint64_t size = bucket - > _cachedData [ j ] - > size ( ) ; <nl> freeValue ( bucket - > _cachedData [ j ] ) ; <nl> - reclaimMemory ( size ) ; <nl> + metadata - > lock ( ) ; <nl> + metadata - > adjustUsageIfAllowed ( - static_cast < int64_t > ( size ) ) ; <nl> + metadata - > unlock ( ) ; <nl> } <nl> } <nl> bucket - > clear ( ) ; <nl> - } <nl> - } <nl> - <nl> - uint32_t TransactionalCache : : getIndex ( uint32_t hash , bool useAuxiliary ) const { <nl> - if ( useAuxiliary ) { <nl> - return ( ( hash & _auxiliaryBucketMask ) > > _auxiliaryMaskShift ) ; <nl> - } <nl> - <nl> - return ( ( hash & _bucketMask ) > > _maskShift ) ; <nl> + } ; <nl> } <nl> mmm a / arangod / Cache / TransactionalCache . h <nl> ppp b / arangod / Cache / TransactionalCache . h <nl> <nl> # include " Basics / Common . h " <nl> # include " Cache / Cache . h " <nl> # include " Cache / CachedValue . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / FrequencyBuffer . h " <nl> # include " Cache / Manager . h " <nl> # include " Cache / ManagerTasks . h " <nl> # include " Cache / Metadata . h " <nl> # include " Cache / State . h " <nl> + # include " Cache / Table . h " <nl> # include " Cache / TransactionalBucket . h " <nl> <nl> # include < stdint . h > <nl> namespace cache { <nl> class TransactionalCache final : public Cache { <nl> public : <nl> TransactionalCache ( Cache : : ConstructionGuard guard , Manager * manager , <nl> - Manager : : MetadataItr metadata , bool allowGrowth , <nl> + Metadata metadata , std : : shared_ptr < Table > table , <nl> bool enableWindowedStats ) ; <nl> ~ TransactionalCache ( ) ; <nl> <nl> class TransactionalCache final : public Cache { <nl> bool blacklist ( void const * key , uint32_t keySize ) ; <nl> <nl> private : <nl> - / / main table info <nl> - TransactionalBucket * _table ; <nl> - uint32_t _logSize ; <nl> - uint64_t _tableSize ; <nl> - uint32_t _maskShift ; <nl> - uint32_t _bucketMask ; <nl> - <nl> - / / auxiliary table info <nl> - TransactionalBucket * _auxiliaryTable ; <nl> - uint32_t _auxiliaryLogSize ; <nl> - uint64_t _auxiliaryTableSize ; <nl> - uint32_t _auxiliaryMaskShift ; <nl> - uint32_t _auxiliaryBucketMask ; <nl> - <nl> / / friend class manager and tasks <nl> friend class FreeMemoryTask ; <nl> friend class Manager ; <nl> class TransactionalCache final : public Cache { <nl> <nl> private : <nl> static uint64_t allocationSize ( bool enableWindowedStats ) ; <nl> - static std : : shared_ptr < Cache > create ( Manager * manager , <nl> - Manager : : MetadataItr metadata , <nl> - bool allowGrowth , <nl> + static std : : shared_ptr < Cache > create ( Manager * manager , Metadata metadata , <nl> + std : : shared_ptr < Table > table , <nl> bool enableWindowedStats ) ; <nl> - / / management <nl> - bool freeMemory ( ) ; <nl> - bool migrate ( ) ; <nl> - void clearTables ( ) ; <nl> + <nl> + virtual uint64_t freeMemoryFrom ( uint32_t hash ) ; <nl> + virtual void migrateBucket ( void * sourcePtr , <nl> + std : : unique_ptr < Table : : Subtable > targets , <nl> + std : : shared_ptr < Table > newTable ) ; <nl> <nl> / / helpers <nl> - std : : pair < bool , TransactionalBucket * > getBucket ( uint32_t hash , <nl> - int64_t maxTries , <nl> - bool singleOperation = true ) ; <nl> - void clearTable ( TransactionalBucket * table , uint64_t tableSize ) ; <nl> + std : : tuple < bool , TransactionalBucket * , std : : shared_ptr < Table > > getBucket ( <nl> + uint32_t hash , int64_t maxTries , bool singleOperation = true ) ; <nl> uint32_t getIndex ( uint32_t hash , bool useAuxiliary ) const ; <nl> + <nl> + static Table : : BucketClearer bucketClearer ( Metadata * metadata ) ; <nl> } ; <nl> <nl> } ; / / end namespace cache <nl> mmm a / tests / CMakeLists . txt <nl> ppp b / tests / CMakeLists . txt <nl> add_executable ( <nl> Cache / Metadata . cpp <nl> Cache / MockScheduler . cpp <nl> Cache / PlainBucket . cpp <nl> + Cache / PlainCache . cpp <nl> Cache / Rebalancer . cpp <nl> Cache / State . cpp <nl> + Cache / Table . cpp <nl> Cache / TransactionalBucket . cpp <nl> Cache / TransactionalCache . cpp <nl> Cache / TransactionalStore . cpp <nl> mmm a / tests / Cache / CachedValue . cpp <nl> ppp b / tests / Cache / CachedValue . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / CachedValue . h " <nl> # include " Basics / Common . h " <nl> <nl> # include " catch . hpp " <nl> <nl> - # include " Cache / CachedValue . h " <nl> - <nl> # include < stdint . h > <nl> # include < string > <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - TEST_CASE ( " CCacheCachedValueTest " , " [ cache ] " ) { <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test construct with valid data <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_construct_valid " ) { <nl> - uint64_t k = 1 ; <nl> - std : : string v ( " test " ) ; <nl> - CachedValue * cv ; <nl> - <nl> - / / fixed key , variable value <nl> - cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , v . data ( ) , v . size ( ) ) ; <nl> - CHECK ( nullptr ! = cv ) ; <nl> - CHECK ( sizeof ( uint64_t ) = = cv - > keySize ) ; <nl> - CHECK ( v . size ( ) = = cv - > valueSize ) ; <nl> - CHECK ( sizeof ( CachedValue ) + sizeof ( uint64_t ) + v . size ( ) = = <nl> - cv - > size ( ) ) ; <nl> - CHECK ( k = = * reinterpret_cast < uint64_t const * > ( cv - > key ( ) ) ) ; <nl> - CHECK ( 0 = = memcmp ( v . data ( ) , cv - > value ( ) , v . size ( ) ) ) ; <nl> - delete cv ; <nl> - <nl> - / / variable key , fixed value <nl> - cv = CachedValue : : construct ( v . data ( ) , v . size ( ) , & k , sizeof ( uint64_t ) ) ; <nl> - CHECK ( nullptr ! = cv ) ; <nl> - CHECK ( v . size ( ) = = cv - > keySize ) ; <nl> - CHECK ( sizeof ( uint64_t ) = = cv - > valueSize ) ; <nl> - CHECK ( sizeof ( CachedValue ) + sizeof ( uint64_t ) + v . size ( ) = = <nl> - cv - > size ( ) ) ; <nl> - CHECK ( 0 = = memcmp ( v . data ( ) , cv - > key ( ) , v . size ( ) ) ) ; <nl> - CHECK ( k = = * reinterpret_cast < uint64_t const * > ( cv - > value ( ) ) ) ; <nl> - delete cv ; <nl> - <nl> - / / fixed key , zero length value <nl> - cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , nullptr , 0 ) ; <nl> - CHECK ( nullptr ! = cv ) ; <nl> - CHECK ( sizeof ( uint64_t ) = = cv - > keySize ) ; <nl> - CHECK ( 0ULL = = cv - > valueSize ) ; <nl> - CHECK ( sizeof ( CachedValue ) + sizeof ( uint64_t ) = = cv - > size ( ) ) ; <nl> - CHECK ( k = = * reinterpret_cast < uint64_t const * > ( cv - > key ( ) ) ) ; <nl> - CHECK ( nullptr = = cv - > value ( ) ) ; <nl> - delete cv ; <nl> + TEST_CASE ( " cache : : CachedValue " , " [ cache ] " ) { <nl> + SECTION ( " test constructor with valid input " ) { <nl> + uint64_t k = 1 ; <nl> + std : : string v ( " test " ) ; <nl> + CachedValue * cv ; <nl> + <nl> + / / fixed key , variable value <nl> + cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , v . data ( ) , v . size ( ) ) ; <nl> + REQUIRE ( nullptr ! = cv ) ; <nl> + REQUIRE ( sizeof ( uint64_t ) = = cv - > keySize ) ; <nl> + REQUIRE ( v . size ( ) = = cv - > valueSize ) ; <nl> + REQUIRE ( sizeof ( CachedValue ) + sizeof ( uint64_t ) + v . size ( ) = = cv - > size ( ) ) ; <nl> + REQUIRE ( k = = * reinterpret_cast < uint64_t const * > ( cv - > key ( ) ) ) ; <nl> + REQUIRE ( 0 = = memcmp ( v . data ( ) , cv - > value ( ) , v . size ( ) ) ) ; <nl> + delete cv ; <nl> + <nl> + / / variable key , fixed value <nl> + cv = CachedValue : : construct ( v . data ( ) , v . size ( ) , & k , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( nullptr ! = cv ) ; <nl> + REQUIRE ( v . size ( ) = = cv - > keySize ) ; <nl> + REQUIRE ( sizeof ( uint64_t ) = = cv - > valueSize ) ; <nl> + REQUIRE ( sizeof ( CachedValue ) + sizeof ( uint64_t ) + v . size ( ) = = cv - > size ( ) ) ; <nl> + REQUIRE ( 0 = = memcmp ( v . data ( ) , cv - > key ( ) , v . size ( ) ) ) ; <nl> + REQUIRE ( k = = * reinterpret_cast < uint64_t const * > ( cv - > value ( ) ) ) ; <nl> + delete cv ; <nl> + <nl> + / / fixed key , zero length value <nl> + cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , nullptr , 0 ) ; <nl> + REQUIRE ( nullptr ! = cv ) ; <nl> + REQUIRE ( sizeof ( uint64_t ) = = cv - > keySize ) ; <nl> + REQUIRE ( 0ULL = = cv - > valueSize ) ; <nl> + REQUIRE ( sizeof ( CachedValue ) + sizeof ( uint64_t ) = = cv - > size ( ) ) ; <nl> + REQUIRE ( k = = * reinterpret_cast < uint64_t const * > ( cv - > key ( ) ) ) ; <nl> + REQUIRE ( nullptr = = cv - > value ( ) ) ; <nl> + delete cv ; <nl> + } <nl> + <nl> + SECTION ( " test that constructor rejects invalid data " ) { <nl> + uint64_t k = 1 ; <nl> + std : : string v ( " test " ) ; <nl> + CachedValue * cv ; <nl> + <nl> + / / zero size key <nl> + cv = CachedValue : : construct ( & k , 0 , v . data ( ) , v . size ( ) ) ; <nl> + REQUIRE ( nullptr = = cv ) ; <nl> + <nl> + / / nullptr key , zero size <nl> + cv = CachedValue : : construct ( nullptr , 0 , v . data ( ) , v . size ( ) ) ; <nl> + REQUIRE ( nullptr = = cv ) ; <nl> + <nl> + / / nullptr key , non - zero size <nl> + cv = CachedValue : : construct ( nullptr , sizeof ( uint64_t ) , v . data ( ) , v . size ( ) ) ; <nl> + REQUIRE ( nullptr = = cv ) ; <nl> + <nl> + / / nullptr value , non - zero length <nl> + cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , nullptr , v . size ( ) ) ; <nl> + REQUIRE ( nullptr = = cv ) ; <nl> + } <nl> + <nl> + SECTION ( " copy ( ) should produce a correct copy " ) { <nl> + uint64_t k = 1 ; <nl> + std : : string v ( " test " ) ; <nl> + <nl> + / / fixed key , variable value <nl> + auto original = <nl> + CachedValue : : construct ( & k , sizeof ( uint64_t ) , v . data ( ) , v . size ( ) ) ; <nl> + auto copy = original - > copy ( ) ; <nl> + REQUIRE ( nullptr ! = copy ) ; <nl> + REQUIRE ( copy ! = original ) ; <nl> + REQUIRE ( sizeof ( uint64_t ) = = copy - > keySize ) ; <nl> + REQUIRE ( v . size ( ) = = copy - > valueSize ) ; <nl> + REQUIRE ( sizeof ( CachedValue ) + sizeof ( uint64_t ) + v . size ( ) = = copy - > size ( ) ) ; <nl> + REQUIRE ( k = = * reinterpret_cast < uint64_t const * > ( copy - > key ( ) ) ) ; <nl> + REQUIRE ( 0 = = memcmp ( v . data ( ) , copy - > value ( ) , v . size ( ) ) ) ; <nl> + delete original ; <nl> + delete copy ; <nl> + } <nl> + <nl> + SECTION ( " sameKey ( ) method for key comparisons works " ) { <nl> + std : : string k1 ( " test " ) ; <nl> + std : : string k2 ( " testing " ) ; <nl> + std : : string k3 ( " TEST " ) ; <nl> + uint64_t v = 1 ; <nl> + <nl> + auto cv = <nl> + CachedValue : : construct ( k1 . data ( ) , k1 . size ( ) , & v , sizeof ( uint64_t ) ) ; <nl> + <nl> + / / same key <nl> + REQUIRE ( cv - > sameKey ( k1 . data ( ) , k1 . size ( ) ) ) ; <nl> + <nl> + / / different length , matching prefix <nl> + REQUIRE ( ! cv - > sameKey ( k2 . data ( ) , k2 . size ( ) ) ) ; <nl> + <nl> + / / same length , different key <nl> + REQUIRE ( ! cv - > sameKey ( k3 . data ( ) , k3 . size ( ) ) ) ; <nl> + <nl> + delete cv ; <nl> + } <nl> } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test construct with invalid data <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_construct_invalid " ) { <nl> - uint64_t k = 1 ; <nl> - std : : string v ( " test " ) ; <nl> - CachedValue * cv ; <nl> - <nl> - / / zero size key <nl> - cv = CachedValue : : construct ( & k , 0 , v . data ( ) , v . size ( ) ) ; <nl> - CHECK ( nullptr = = cv ) ; <nl> - <nl> - / / nullptr key , zero size <nl> - cv = CachedValue : : construct ( nullptr , 0 , v . data ( ) , v . size ( ) ) ; <nl> - CHECK ( nullptr = = cv ) ; <nl> - <nl> - / / nullptr key , non - zero size <nl> - cv = CachedValue : : construct ( nullptr , sizeof ( uint64_t ) , v . data ( ) , v . size ( ) ) ; <nl> - CHECK ( nullptr = = cv ) ; <nl> - <nl> - / / nullptr value , non - zero length <nl> - cv = CachedValue : : construct ( & k , sizeof ( uint64_t ) , nullptr , v . size ( ) ) ; <nl> - CHECK ( nullptr = = cv ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test copy <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_copy " ) { <nl> - uint64_t k = 1 ; <nl> - std : : string v ( " test " ) ; <nl> - <nl> - / / fixed key , variable value <nl> - auto original = <nl> - CachedValue : : construct ( & k , sizeof ( uint64_t ) , v . data ( ) , v . size ( ) ) ; <nl> - auto copy = original - > copy ( ) ; <nl> - CHECK ( nullptr ! = copy ) ; <nl> - CHECK ( sizeof ( uint64_t ) = = copy - > keySize ) ; <nl> - CHECK ( v . size ( ) = = copy - > valueSize ) ; <nl> - CHECK ( sizeof ( CachedValue ) + sizeof ( uint64_t ) + v . size ( ) = = <nl> - copy - > size ( ) ) ; <nl> - CHECK ( k = = * reinterpret_cast < uint64_t const * > ( copy - > key ( ) ) ) ; <nl> - CHECK ( 0 = = memcmp ( v . data ( ) , copy - > value ( ) , v . size ( ) ) ) ; <nl> - delete original ; <nl> - delete copy ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test key comparison <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_key_comparison " ) { <nl> - std : : string k1 ( " test " ) ; <nl> - std : : string k2 ( " testing " ) ; <nl> - std : : string k3 ( " TEST " ) ; <nl> - uint64_t v = 1 ; <nl> - <nl> - auto cv = CachedValue : : construct ( k1 . data ( ) , k1 . size ( ) , & v , sizeof ( uint64_t ) ) ; <nl> - <nl> - / / same key <nl> - CHECK ( cv - > sameKey ( k1 . data ( ) , k1 . size ( ) ) ) ; <nl> - <nl> - / / different length , matching prefix <nl> - CHECK ( ! cv - > sameKey ( k2 . data ( ) , k2 . size ( ) ) ) ; <nl> - <nl> - / / same length , different key <nl> - CHECK ( ! cv - > sameKey ( k3 . data ( ) , k3 . size ( ) ) ) ; <nl> - <nl> - delete cv ; <nl> - } <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - } <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> mmm a / tests / Cache / FrequencyBuffer . cpp <nl> ppp b / tests / Cache / FrequencyBuffer . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / FrequencyBuffer . h " <nl> # include " Basics / Common . h " <nl> <nl> # include " catch . hpp " <nl> <nl> - # include " Cache / FrequencyBuffer . h " <nl> - <nl> # include < stdint . h > <nl> # include < memory > <nl> <nl> - # include < iostream > <nl> - <nl> using namespace arangodb : : cache ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - TEST_CASE ( " CCacheFrequencyBufferTest " , " [ cache ] " ) { <nl> + TEST_CASE ( " cache : : FrequencyBuffer " , " [ cache ] " ) { <nl> + SECTION ( " test buffer with uint8_t entries " ) { <nl> + uint8_t zero = 0 ; <nl> + uint8_t one = 1 ; <nl> + uint8_t two = 2 ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test behavior with ints <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / check that default construction is as expected <nl> + REQUIRE ( uint8_t ( ) = = zero ) ; <nl> <nl> - SECTION ( " tst_uint8_t " ) { <nl> - uint8_t zero = 0 ; <nl> - uint8_t one = 1 ; <nl> - uint8_t two = 2 ; <nl> + FrequencyBuffer < uint8_t > buffer ( 8 ) ; <nl> + REQUIRE ( buffer . memoryUsage ( ) = = <nl> + sizeof ( FrequencyBuffer < uint8_t > ) + sizeof ( std : : vector < uint8_t > ) + <nl> + 8 ) ; <nl> <nl> - / / check that default construction is as expected <nl> - CHECK ( uint8_t ( ) = = zero ) ; <nl> - <nl> - FrequencyBuffer < uint8_t > buffer ( 8 ) ; <nl> - CHECK ( buffer . memoryUsage ( ) = = <nl> - sizeof ( FrequencyBuffer < uint8_t > ) + sizeof ( std : : vector < uint8_t > ) + 8 ) ; <nl> + for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> + buffer . insertRecord ( two ) ; <nl> + } <nl> + for ( size_t i = 0 ; i < 2 ; i + + ) { <nl> + buffer . insertRecord ( one ) ; <nl> + } <nl> <nl> - for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> - buffer . insertRecord ( two ) ; <nl> - } <nl> - for ( size_t i = 0 ; i < 2 ; i + + ) { <nl> - buffer . insertRecord ( one ) ; <nl> - } <nl> + auto frequencies = buffer . getFrequencies ( ) ; <nl> + REQUIRE ( static_cast < uint64_t > ( 2 ) = = frequencies - > size ( ) ) ; <nl> + REQUIRE ( one = = ( * frequencies ) [ 0 ] . first ) ; <nl> + REQUIRE ( static_cast < uint64_t > ( 2 ) = = ( * frequencies ) [ 0 ] . second ) ; <nl> + REQUIRE ( two = = ( * frequencies ) [ 1 ] . first ) ; <nl> + REQUIRE ( static_cast < uint64_t > ( 4 ) = = ( * frequencies ) [ 1 ] . second ) ; <nl> <nl> - auto frequencies = buffer . getFrequencies ( ) ; <nl> - CHECK ( static_cast < uint64_t > ( 2 ) = = frequencies - > size ( ) ) ; <nl> - CHECK ( one = = ( * frequencies ) [ 0 ] . first ) ; <nl> - CHECK ( static_cast < uint64_t > ( 2 ) = = ( * frequencies ) [ 0 ] . second ) ; <nl> - CHECK ( two = = ( * frequencies ) [ 1 ] . first ) ; <nl> - CHECK ( static_cast < uint64_t > ( 4 ) = = ( * frequencies ) [ 1 ] . second ) ; <nl> + for ( size_t i = 0 ; i < 8 ; i + + ) { <nl> + buffer . insertRecord ( one ) ; <nl> + } <nl> <nl> - for ( size_t i = 0 ; i < 8 ; i + + ) { <nl> - buffer . insertRecord ( one ) ; <nl> + frequencies = buffer . getFrequencies ( ) ; <nl> + REQUIRE ( static_cast < size_t > ( 1 ) = = frequencies - > size ( ) ) ; <nl> + REQUIRE ( one = = ( * frequencies ) [ 0 ] . first ) ; <nl> + REQUIRE ( static_cast < uint64_t > ( 8 ) = = ( * frequencies ) [ 0 ] . second ) ; <nl> } <nl> <nl> - frequencies = buffer . getFrequencies ( ) ; <nl> - CHECK ( static_cast < size_t > ( 1 ) = = frequencies - > size ( ) ) ; <nl> - CHECK ( one = = ( * frequencies ) [ 0 ] . first ) ; <nl> - CHECK ( static_cast < uint64_t > ( 8 ) = = ( * frequencies ) [ 0 ] . second ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test behavior with shared_ptr <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_pointers " ) { <nl> - struct cmp_weak_ptr { <nl> - bool operator ( ) ( std : : weak_ptr < int > const & left , <nl> - std : : weak_ptr < int > const & right ) const { <nl> - return ! left . owner_before ( right ) & & ! right . owner_before ( left ) ; <nl> + SECTION ( " test buffer with weak_ptr entries " ) { <nl> + struct cmp_weak_ptr { <nl> + bool operator ( ) ( std : : weak_ptr < int > const & left , <nl> + std : : weak_ptr < int > const & right ) const { <nl> + return ! left . owner_before ( right ) & & ! right . owner_before ( left ) ; <nl> + } <nl> + } ; <nl> + <nl> + struct hash_weak_ptr { <nl> + size_t operator ( ) ( std : : weak_ptr < int > const & wp ) const { <nl> + auto sp = wp . lock ( ) ; <nl> + return std : : hash < decltype ( sp ) > ( ) ( sp ) ; <nl> + } <nl> + } ; <nl> + <nl> + typedef FrequencyBuffer < std : : weak_ptr < int > , cmp_weak_ptr , hash_weak_ptr > <nl> + BufferType ; <nl> + <nl> + std : : shared_ptr < int > p0 ( nullptr ) ; <nl> + <nl> + / / check that default construction is as expected <nl> + REQUIRE ( std : : shared_ptr < int > ( ) = = p0 ) ; <nl> + <nl> + std : : shared_ptr < int > p1 ( new int ( ) ) ; <nl> + * p1 = static_cast < int > ( 1 ) ; <nl> + std : : shared_ptr < int > p2 ( new int ( ) ) ; <nl> + * p2 = static_cast < int > ( 2 ) ; <nl> + <nl> + BufferType buffer ( 8 ) ; <nl> + REQUIRE ( buffer . memoryUsage ( ) = = <nl> + sizeof ( BufferType ) + sizeof ( std : : vector < std : : weak_ptr < int > > ) + <nl> + ( 8 * sizeof ( std : : weak_ptr < int > ) ) ) ; <nl> + <nl> + for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> + buffer . insertRecord ( p1 ) ; <nl> } <nl> - } ; <nl> - <nl> - struct hash_weak_ptr { <nl> - size_t operator ( ) ( std : : weak_ptr < int > const & wp ) const { <nl> - auto sp = wp . lock ( ) ; <nl> - return std : : hash < decltype ( sp ) > ( ) ( sp ) ; <nl> + for ( size_t i = 0 ; i < 2 ; i + + ) { <nl> + buffer . insertRecord ( p2 ) ; <nl> } <nl> - } ; <nl> - <nl> - typedef FrequencyBuffer < std : : weak_ptr < int > , cmp_weak_ptr , hash_weak_ptr > <nl> - BufferType ; <nl> - <nl> - std : : shared_ptr < int > p0 ( nullptr ) ; <nl> - <nl> - / / check that default construction is as expected <nl> - CHECK ( std : : shared_ptr < int > ( ) = = p0 ) ; <nl> - <nl> - std : : shared_ptr < int > p1 ( new int ( ) ) ; <nl> - * p1 = static_cast < int > ( 1 ) ; <nl> - std : : shared_ptr < int > p2 ( new int ( ) ) ; <nl> - * p2 = static_cast < int > ( 2 ) ; <nl> - <nl> - BufferType buffer ( 8 ) ; <nl> - CHECK ( buffer . memoryUsage ( ) = = <nl> - sizeof ( BufferType ) + <nl> - sizeof ( std : : vector < std : : weak_ptr < int > > ) + <nl> - ( 8 * sizeof ( std : : weak_ptr < int > ) ) ) ; <nl> <nl> - for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> - buffer . insertRecord ( p1 ) ; <nl> + auto frequencies = buffer . getFrequencies ( ) ; <nl> + REQUIRE ( static_cast < uint64_t > ( 2 ) = = frequencies - > size ( ) ) ; <nl> + REQUIRE ( p2 = = ( * frequencies ) [ 0 ] . first . lock ( ) ) ; <nl> + REQUIRE ( static_cast < uint64_t > ( 2 ) = = ( * frequencies ) [ 0 ] . second ) ; <nl> + REQUIRE ( p1 = = ( * frequencies ) [ 1 ] . first . lock ( ) ) ; <nl> + REQUIRE ( static_cast < uint64_t > ( 4 ) = = ( * frequencies ) [ 1 ] . second ) ; <nl> } <nl> - for ( size_t i = 0 ; i < 2 ; i + + ) { <nl> - buffer . insertRecord ( p2 ) ; <nl> - } <nl> - <nl> - auto frequencies = buffer . getFrequencies ( ) ; <nl> - CHECK ( static_cast < uint64_t > ( 2 ) = = frequencies - > size ( ) ) ; <nl> - CHECK ( p2 = = ( * frequencies ) [ 0 ] . first . lock ( ) ) ; <nl> - CHECK ( static_cast < uint64_t > ( 2 ) = = ( * frequencies ) [ 0 ] . second ) ; <nl> - CHECK ( p1 = = ( * frequencies ) [ 1 ] . first . lock ( ) ) ; <nl> - CHECK ( static_cast < uint64_t > ( 4 ) = = ( * frequencies ) [ 1 ] . second ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> } <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> mmm a / tests / Cache / Manager . cpp <nl> ppp b / tests / Cache / Manager . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / Manager . h " <nl> # include " Basics / Common . h " <nl> - # include " Random / RandomGenerator . h " <nl> - <nl> - # include " catch . hpp " <nl> - <nl> # include " Cache / CacheManagerFeatureThreads . h " <nl> - # include " Cache / Manager . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / PlainCache . h " <nl> + # include " Random / RandomGenerator . h " <nl> <nl> # include " MockScheduler . h " <nl> + # include " catch . hpp " <nl> <nl> # include < stdint . h > <nl> # include < queue > <nl> <nl> # include < thread > <nl> # include < vector > <nl> <nl> - # include < iostream > <nl> - <nl> using namespace arangodb ; <nl> using namespace arangodb : : cache ; <nl> <nl> - TEST_CASE ( " CCacheManagerTest " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test constructor with valid data <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + TEST_CASE ( " cache : : Manager " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> + SECTION ( " test basic constructor function " ) { <nl> + uint64_t requestLimit = 1024 * 1024 ; <nl> + Manager manager ( nullptr , requestLimit ) ; <nl> <nl> - SECTION ( " tst_constructor " ) { <nl> - uint64_t requestLimit = 1024 * 1024 ; <nl> - Manager manager ( nullptr , requestLimit ) ; <nl> + REQUIRE ( requestLimit = = manager . globalLimit ( ) ) ; <nl> <nl> - CHECK ( requestLimit = = manager . globalLimit ( ) ) ; <nl> + REQUIRE ( 0ULL < manager . globalAllocation ( ) ) ; <nl> + REQUIRE ( requestLimit > manager . globalAllocation ( ) ) ; <nl> <nl> - CHECK ( 0ULL < manager . globalAllocation ( ) ) ; <nl> - CHECK ( requestLimit > manager . globalAllocation ( ) ) ; <nl> + uint64_t bigRequestLimit = 4ULL * 1024ULL * 1024ULL * 1024ULL ; <nl> + Manager bigManager ( nullptr , bigRequestLimit ) ; <nl> <nl> - uint64_t bigRequestLimit = 4ULL * 1024ULL * 1024ULL * 1024ULL ; <nl> - Manager bigManager ( nullptr , bigRequestLimit ) ; <nl> - <nl> - CHECK ( bigRequestLimit = = bigManager . globalLimit ( ) ) ; <nl> - <nl> - CHECK ( ( 1024ULL * 1024ULL ) < bigManager . globalAllocation ( ) ) ; <nl> - CHECK ( bigRequestLimit > bigManager . globalAllocation ( ) ) ; <nl> - } <nl> + REQUIRE ( bigRequestLimit = = bigManager . globalLimit ( ) ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test mixed load behavior ( multi - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_mixed_load " ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> - size_t cacheCount = 4 ; <nl> - size_t threadCount = 4 ; <nl> - std : : vector < std : : shared_ptr < Cache > > caches ; <nl> - for ( size_t i = 0 ; i < cacheCount ; i + + ) { <nl> - auto res = <nl> - / * manager . createCache ( ( ( i % 2 = = 0 ) ? Manager : : CacheType : : Plain <nl> - : Manager : : CacheType : : Transactional ) , <nl> - initialSize , true ) ; * / <nl> - manager . createCache ( Manager : : CacheType : : Plain , initialSize , true ) ; <nl> - TRI_ASSERT ( res ) ; <nl> - caches . emplace_back ( res ) ; <nl> + REQUIRE ( ( 1024ULL * 1024ULL ) < bigManager . globalAllocation ( ) ) ; <nl> + REQUIRE ( bigRequestLimit > bigManager . globalAllocation ( ) ) ; <nl> } <nl> <nl> - uint64_t chunkSize = 4 * 1024 * 1024 ; <nl> - uint64_t initialInserts = 1 * 1024 * 1024 ; <nl> - uint64_t operationCount = 4 * 1024 * 1024 ; <nl> - std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> - std : : atomic < uint64_t > missCount ( 0 ) ; <nl> - auto worker = [ & manager , & caches , cacheCount , initialInserts , operationCount , <nl> - & hitCount , <nl> - & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> - / / fill with some initial data <nl> - for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> - uint64_t item = lower + i ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> - CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> - & item , sizeof ( uint64_t ) ) ; <nl> - bool ok = caches [ cacheIndex ] - > insert ( value ) ; <nl> - if ( ! ok ) { <nl> - delete value ; <nl> - } <nl> + SECTION ( " test mixed cache types under mixed load " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> + size_t cacheCount = 4 ; <nl> + size_t threadCount = 4 ; <nl> + std : : vector < std : : shared_ptr < Cache > > caches ; <nl> + for ( size_t i = 0 ; i < cacheCount ; i + + ) { <nl> + auto res = manager . createCache ( <nl> + ( ( i % 2 = = 0 ) ? CacheType : : Plain : CacheType : : Transactional ) ) ; <nl> + TRI_ASSERT ( res ) ; <nl> + caches . emplace_back ( res ) ; <nl> } <nl> <nl> - / / initialize valid range for keys that * might * be in cache <nl> - uint64_t validLower = lower ; <nl> - uint64_t validUpper = lower + initialInserts - 1 ; <nl> - <nl> - / / commence mixed workload <nl> - for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> - uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99UL ) ) ; <nl> - <nl> - if ( r > = 99 ) { / / remove something <nl> - if ( validLower = = validUpper ) { <nl> - continue ; / / removed too much <nl> - } <nl> - <nl> - uint64_t item = validLower + + ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> - <nl> - caches [ cacheIndex ] - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> - } else if ( r > = 95 ) { / / insert something <nl> - if ( validUpper = = upper ) { <nl> - continue ; / / already maxed out range <nl> - } <nl> - <nl> - uint64_t item = + + validUpper ; <nl> + uint64_t chunkSize = 4 * 1024 * 1024 ; <nl> + uint64_t initialInserts = 1 * 1024 * 1024 ; <nl> + uint64_t operationCount = 4 * 1024 * 1024 ; <nl> + std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> + std : : atomic < uint64_t > missCount ( 0 ) ; <nl> + auto worker = [ & manager , & caches , cacheCount , initialInserts , <nl> + operationCount , & hitCount , <nl> + & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> + / / fill with some initial data <nl> + for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> + uint64_t item = lower + i ; <nl> size_t cacheIndex = item % cacheCount ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> SECTION ( " tst_mixed_load " ) { <nl> if ( ! ok ) { <nl> delete value ; <nl> } <nl> - } else { / / lookup something <nl> - uint64_t item = RandomGenerator : : interval ( <nl> - static_cast < int64_t > ( validLower ) , static_cast < int64_t > ( validUpper ) ) ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> - <nl> - Cache : : Finding f = caches [ cacheIndex ] - > find ( & item , sizeof ( uint64_t ) ) ; <nl> - if ( f . found ( ) ) { <nl> - hitCount + + ; <nl> - TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> - TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> - } else { <nl> - missCount + + ; <nl> - TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> - } <nl> } <nl> - } <nl> - } ; <nl> - <nl> - std : : vector < std : : thread * > threads ; <nl> - / / dispatch threads <nl> - for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> - uint64_t lower = i * chunkSize ; <nl> - uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> - threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> - } <nl> <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> - } <nl> + / / initialize valid range for keys that * might * be in cache <nl> + uint64_t validLower = lower ; <nl> + uint64_t validUpper = lower + initialInserts - 1 ; <nl> <nl> - for ( auto cache : caches ) { <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> + / / commence mixed workload <nl> + for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> + uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99 ) ) ; <nl> <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> + if ( r > = 99 ) { / / remove something <nl> + if ( validLower = = validUpper ) { <nl> + continue ; / / removed too much <nl> + } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test creation / destruction chaos ( multi - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + uint64_t item = validLower + + ; <nl> + size_t cacheIndex = item % cacheCount ; <nl> <nl> - SECTION ( " tst_lifecycle_chaos " ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> - size_t threadCount = 4 ; <nl> - uint64_t operationCount = 4ULL * 1024ULL ; <nl> - <nl> - auto worker = [ & manager , initialSize , operationCount ] ( ) - > void { <nl> - std : : queue < std : : shared_ptr < Cache > > caches ; <nl> - <nl> - for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> - uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 1 ) ) ; <nl> - switch ( r ) { <nl> - case 0 : { <nl> - auto res = manager . createCache ( <nl> - ( i % 2 = = 0 ) ? Manager : : CacheType : : Plain <nl> - : Manager : : CacheType : : Transactional , <nl> - initialSize , true ) ; <nl> - if ( res ) { <nl> - caches . emplace ( res ) ; <nl> + caches [ cacheIndex ] - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> + } else if ( r > = 95 ) { / / insert something <nl> + if ( validUpper = = upper ) { <nl> + continue ; / / already maxed out range <nl> } <nl> - } <nl> - case 1 : <nl> - default : { <nl> - if ( ! caches . empty ( ) ) { <nl> - auto cache = caches . front ( ) ; <nl> - caches . pop ( ) ; <nl> - manager . destroyCache ( cache ) ; <nl> + <nl> + uint64_t item = + + validUpper ; <nl> + size_t cacheIndex = item % cacheCount ; <nl> + CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> + & item , sizeof ( uint64_t ) ) ; <nl> + bool ok = caches [ cacheIndex ] - > insert ( value ) ; <nl> + if ( ! ok ) { <nl> + delete value ; <nl> + } <nl> + } else { / / lookup something <nl> + uint64_t item = <nl> + RandomGenerator : : interval ( static_cast < int64_t > ( validLower ) , <nl> + static_cast < int64_t > ( validUpper ) ) ; <nl> + size_t cacheIndex = item % cacheCount ; <nl> + <nl> + Cache : : Finding f = caches [ cacheIndex ] - > find ( & item , sizeof ( uint64_t ) ) ; <nl> + if ( f . found ( ) ) { <nl> + hitCount + + ; <nl> + TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> + TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + missCount + + ; <nl> + TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> } <nl> } <nl> } <nl> + } ; <nl> + <nl> + std : : vector < std : : thread * > threads ; <nl> + / / dispatch threads <nl> + for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> + uint64_t lower = i * chunkSize ; <nl> + uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> + threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> } <nl> - } ; <nl> <nl> - std : : vector < std : : thread * > threads ; <nl> - / / dispatch threads <nl> - for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> - threads . push_back ( new std : : thread ( worker ) ) ; <nl> - } <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> + } <nl> <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> + for ( auto cache : caches ) { <nl> + manager . destroyCache ( cache ) ; <nl> + } <nl> + <nl> + RandomGenerator : : shutdown ( ) ; <nl> } <nl> <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> + SECTION ( " test manager under cache lifecycle chaos " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> + size_t threadCount = 4 ; <nl> + uint64_t operationCount = 4ULL * 1024ULL ; <nl> + <nl> + auto worker = [ & manager , operationCount ] ( ) - > void { <nl> + std : : queue < std : : shared_ptr < Cache > > caches ; <nl> + <nl> + for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> + uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 1 ) ) ; <nl> + switch ( r ) { <nl> + case 0 : { <nl> + auto res = manager . createCache ( <nl> + ( i % 2 = = 0 ) ? CacheType : : Plain : CacheType : : Transactional ) ; <nl> + if ( res ) { <nl> + caches . emplace ( res ) ; <nl> + } <nl> + } <nl> + case 1 : <nl> + default : { <nl> + if ( ! caches . empty ( ) ) { <nl> + auto cache = caches . front ( ) ; <nl> + caches . pop ( ) ; <nl> + manager . destroyCache ( cache ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + std : : vector < std : : thread * > threads ; <nl> + / / dispatch threads <nl> + for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> + threads . push_back ( new std : : thread ( worker ) ) ; <nl> + } <nl> <nl> - } <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> + } <nl> <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> + RandomGenerator : : shutdown ( ) ; <nl> + } <nl> + } <nl> mmm a / tests / Cache / Metadata . cpp <nl> ppp b / tests / Cache / Metadata . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / Metadata . h " <nl> # include " Basics / Common . h " <nl> + # include " Cache / PlainCache . h " <nl> + # include " Cache / Table . h " <nl> <nl> # include " catch . hpp " <nl> <nl> - # include " Cache / Metadata . h " <nl> - <nl> # include < stdint . h > <nl> # include < memory > <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - TEST_CASE ( " CCacheMetadataTest " , " [ cache ] " ) { <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test constructor with valid data <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_constructor " ) { <nl> - uint64_t limit = 1024 ; <nl> - Metadata metadata ( limit ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test getters <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_getters " ) { <nl> - uint64_t dummy ; <nl> - std : : shared_ptr < Cache > dummyCache ( reinterpret_cast < Cache * > ( & dummy ) , <nl> - [ ] ( Cache * p ) - > void { } ) ; <nl> - uint64_t limit = 1024 ; <nl> - <nl> - Metadata metadata ( limit ) ; <nl> - metadata . link ( dummyCache ) ; <nl> - <nl> - metadata . lock ( ) ; <nl> - <nl> - CHECK ( dummyCache = = metadata . cache ( ) ) ; <nl> - <nl> - CHECK ( limit = = metadata . softLimit ( ) ) ; <nl> - CHECK ( limit = = metadata . hardLimit ( ) ) ; <nl> - CHECK ( 0UL = = metadata . usage ( ) ) ; <nl> - <nl> - metadata . unlock ( ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test usage limits <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_usage_limits " ) { <nl> - bool success ; <nl> - <nl> - Metadata metadata ( 1024ULL ) ; <nl> - <nl> - metadata . lock ( ) ; <nl> - <nl> - success = metadata . adjustUsageIfAllowed ( 512LL ) ; <nl> - CHECK ( success ) ; <nl> - success = metadata . adjustUsageIfAllowed ( 512LL ) ; <nl> - CHECK ( success ) ; <nl> - success = metadata . adjustUsageIfAllowed ( 512LL ) ; <nl> - CHECK ( ! success ) ; <nl> - <nl> - success = metadata . adjustLimits ( 2048ULL , 2048ULL ) ; <nl> - CHECK ( success ) ; <nl> - <nl> - success = metadata . adjustUsageIfAllowed ( 1024LL ) ; <nl> - CHECK ( success ) ; <nl> - <nl> - success = metadata . adjustLimits ( 1024ULL , 2048ULL ) ; <nl> - CHECK ( success ) ; <nl> - <nl> - success = metadata . adjustUsageIfAllowed ( 512LL ) ; <nl> - CHECK ( ! success ) ; <nl> - success = metadata . adjustUsageIfAllowed ( - 512LL ) ; <nl> - CHECK ( success ) ; <nl> - success = metadata . adjustUsageIfAllowed ( 512LL ) ; <nl> - CHECK ( success ) ; <nl> - success = metadata . adjustUsageIfAllowed ( - 1024LL ) ; <nl> - CHECK ( success ) ; <nl> - success = metadata . adjustUsageIfAllowed ( 512LL ) ; <nl> - CHECK ( ! success ) ; <nl> - <nl> - success = metadata . adjustLimits ( 1024ULL , 1024ULL ) ; <nl> - CHECK ( success ) ; <nl> - success = metadata . adjustLimits ( 512ULL , 512ULL ) ; <nl> - CHECK ( ! success ) ; <nl> - <nl> - metadata . unlock ( ) ; <nl> + TEST_CASE ( " cache : : Metadata " , " [ cache ] " ) { <nl> + SECTION ( " test basic constructor " ) { <nl> + uint64_t usageLimit = 1024 ; <nl> + uint64_t fixed = 128 ; <nl> + uint64_t table = Table : : allocationSize ( Table : : minLogSize ) ; <nl> + uint64_t max = UINT64_MAX ; <nl> + Metadata metadata ( usageLimit , fixed , table , max ) ; <nl> + <nl> + REQUIRE ( metadata . fixedSize = = fixed ) ; <nl> + REQUIRE ( metadata . tableSize = = table ) ; <nl> + REQUIRE ( metadata . maxSize = = max ) ; <nl> + REQUIRE ( metadata . allocatedSize > ( usageLimit + fixed + table ) ) ; <nl> + REQUIRE ( metadata . deservedSize = = metadata . allocatedSize ) ; <nl> + <nl> + REQUIRE ( metadata . usage = = 0 ) ; <nl> + REQUIRE ( metadata . softUsageLimit = = usageLimit ) ; <nl> + REQUIRE ( metadata . hardUsageLimit = = usageLimit ) ; <nl> + } <nl> + <nl> + SECTION ( " verify usage limits are adjusted and enforced correctly " ) { <nl> + bool success ; <nl> + uint64_t overhead = 48 ; <nl> + Metadata metadata ( 1024 , 0 , 0 , 2048 + overhead ) ; <nl> + <nl> + metadata . lock ( ) ; <nl> + <nl> + REQUIRE ( metadata . adjustUsageIfAllowed ( 512 ) ) ; <nl> + REQUIRE ( metadata . adjustUsageIfAllowed ( 512 ) ) ; <nl> + REQUIRE ( ! metadata . adjustUsageIfAllowed ( 512 ) ) ; <nl> + <nl> + REQUIRE ( ! metadata . adjustLimits ( 2048 , 2048 ) ) ; <nl> + REQUIRE ( metadata . allocatedSize = = 1024 + overhead ) ; <nl> + REQUIRE ( metadata . adjustDeserved ( 2048 + overhead ) ) ; <nl> + REQUIRE ( metadata . adjustLimits ( 2048 , 2048 ) ) ; <nl> + REQUIRE ( metadata . allocatedSize = = 2048 + overhead ) ; <nl> + <nl> + REQUIRE ( metadata . adjustUsageIfAllowed ( 1024 ) ) ; <nl> + <nl> + REQUIRE ( metadata . adjustLimits ( 1024 , 2048 ) ) ; <nl> + REQUIRE ( metadata . allocatedSize = = 2048 + overhead ) ; <nl> + <nl> + REQUIRE ( ! metadata . adjustUsageIfAllowed ( 512 ) ) ; <nl> + REQUIRE ( metadata . adjustUsageIfAllowed ( - 512 ) ) ; <nl> + REQUIRE ( metadata . adjustUsageIfAllowed ( 512 ) ) ; <nl> + REQUIRE ( metadata . adjustUsageIfAllowed ( - 1024 ) ) ; <nl> + REQUIRE ( ! metadata . adjustUsageIfAllowed ( 512 ) ) ; <nl> + <nl> + REQUIRE ( metadata . adjustLimits ( 1024 , 1024 ) ) ; <nl> + REQUIRE ( metadata . allocatedSize = = 1024 + overhead ) ; <nl> + REQUIRE ( ! metadata . adjustLimits ( 512 , 512 ) ) ; <nl> + <nl> + REQUIRE ( ! metadata . adjustLimits ( 2049 , 2049 ) ) ; <nl> + REQUIRE ( metadata . allocatedSize = = 1024 + overhead ) ; <nl> + <nl> + metadata . unlock ( ) ; <nl> + } <nl> + <nl> + SECTION ( " verify table methods work correctly " ) { <nl> + bool success ; <nl> + uint64_t overhead = 48 ; <nl> + Metadata metadata ( 1024 , 0 , 512 , 2048 + overhead ) ; <nl> + <nl> + metadata . lock ( ) ; <nl> + <nl> + REQUIRE ( ! metadata . migrationAllowed ( 1024 ) ) ; <nl> + REQUIRE ( 2048 + overhead = = metadata . adjustDeserved ( 2048 + overhead ) ) ; <nl> + <nl> + REQUIRE ( metadata . migrationAllowed ( 1024 ) ) ; <nl> + metadata . changeTable ( 1024 ) ; <nl> + REQUIRE ( metadata . tableSize = = 1024 ) ; <nl> + REQUIRE ( metadata . allocatedSize = = 2048 + overhead ) ; <nl> + <nl> + REQUIRE ( ! metadata . migrationAllowed ( 1025 ) ) ; <nl> + REQUIRE ( metadata . migrationAllowed ( 512 ) ) ; <nl> + metadata . changeTable ( 512 ) ; <nl> + REQUIRE ( metadata . tableSize = = 512 ) ; <nl> + REQUIRE ( metadata . allocatedSize = = 1536 + overhead ) ; <nl> + <nl> + metadata . unlock ( ) ; <nl> + } <nl> } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test migration methods <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_migration " ) { <nl> - uint8_t dummyTable ; <nl> - uint8_t dummyAuxiliaryTable ; <nl> - uint32_t logSize = 1 ; <nl> - uint32_t auxiliaryLogSize = 2 ; <nl> - uint64_t limit = 1024 ; <nl> - <nl> - Metadata metadata ( limit ) ; <nl> - <nl> - metadata . lock ( ) ; <nl> - <nl> - metadata . grantAuxiliaryTable ( & dummyTable , logSize ) ; <nl> - metadata . swapTables ( ) ; <nl> - <nl> - metadata . grantAuxiliaryTable ( & dummyAuxiliaryTable , auxiliaryLogSize ) ; <nl> - CHECK ( auxiliaryLogSize = = metadata . auxiliaryLogSize ( ) ) ; <nl> - CHECK ( & dummyAuxiliaryTable = = metadata . auxiliaryTable ( ) ) ; <nl> - <nl> - metadata . swapTables ( ) ; <nl> - CHECK ( logSize = = metadata . auxiliaryLogSize ( ) ) ; <nl> - CHECK ( auxiliaryLogSize = = metadata . logSize ( ) ) ; <nl> - CHECK ( & dummyTable = = metadata . auxiliaryTable ( ) ) ; <nl> - CHECK ( & dummyAuxiliaryTable = = metadata . table ( ) ) ; <nl> - <nl> - uint8_t * result = metadata . releaseAuxiliaryTable ( ) ; <nl> - CHECK ( 0UL = = metadata . auxiliaryLogSize ( ) ) ; <nl> - CHECK ( nullptr = = metadata . auxiliaryTable ( ) ) ; <nl> - CHECK ( result = = & dummyTable ) ; <nl> - <nl> - result = metadata . releaseTable ( ) ; <nl> - CHECK ( 0UL = = metadata . logSize ( ) ) ; <nl> - CHECK ( nullptr = = metadata . table ( ) ) ; <nl> - CHECK ( result = = & dummyAuxiliaryTable ) ; <nl> - <nl> - metadata . unlock ( ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - } <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> mmm a / tests / Cache / MockScheduler . cpp <nl> ppp b / tests / Cache / MockScheduler . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # include " MockScheduler . h " <nl> mmm a / tests / Cache / MockScheduler . h <nl> ppp b / tests / Cache / MockScheduler . h <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # ifndef UNITTESTS_CACHE_MOCK_SCHEDULER_H <nl> mmm a / tests / Cache / PlainBucket . cpp <nl> ppp b / tests / Cache / PlainBucket . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / PlainBucket . h " <nl> # include " Basics / Common . h " <nl> <nl> # include " catch . hpp " <nl> <nl> - # include " Cache / PlainBucket . h " <nl> - <nl> # include < stdint . h > <nl> # include < string > <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - TEST_CASE ( " CCachePlainBucketTest " , " [ cache ] " ) { <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test insertion to full and fail beyond <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_insertion " ) { <nl> - PlainBucket bucket ; <nl> - bool success ; <nl> - <nl> - uint32_t hashes [ 6 ] = { <nl> - 1 , 2 , 3 , <nl> - 4 , 5 , 6 } ; / / don ' t have to be real , but should be unique and non - zero <nl> - uint64_t keys [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> - uint64_t values [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> - CachedValue * ptrs [ 6 ] ; <nl> - for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> - ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , & ( values [ i ] ) , <nl> - sizeof ( uint64_t ) ) ; <nl> - } <nl> + TEST_CASE ( " cache : : PlainBucket " , " [ cache ] " ) { <nl> + SECTION ( " verify that insertion works correctly " ) { <nl> + PlainBucket bucket ; <nl> + bool success ; <nl> + <nl> + uint32_t hashes [ 6 ] = { <nl> + 1 , 2 , 3 , <nl> + 4 , 5 , 6 } ; / / don ' t have to be real , but should be unique and non - zero <nl> + uint64_t keys [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> + uint64_t values [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> + CachedValue * ptrs [ 6 ] ; <nl> + for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> + ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> + & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + } <nl> <nl> - success = bucket . lock ( - 1LL ) ; <nl> - CHECK ( success ) ; <nl> - <nl> - / / insert five to fill <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - for ( size_t i = 0 ; i < 5 ; i + + ) { <nl> - bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> - if ( i < 4 ) { <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - } else { <nl> - CHECK ( bucket . isFull ( ) ) ; <nl> + success = bucket . lock ( - 1LL ) ; <nl> + REQUIRE ( success ) ; <nl> + <nl> + / / insert five to fill <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + for ( size_t i = 0 ; i < 5 ; i + + ) { <nl> + bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> + if ( i < 4 ) { <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + } else { <nl> + REQUIRE ( bucket . isFull ( ) ) ; <nl> + } <nl> + } <nl> + for ( size_t i = 0 ; i < 5 ; i + + ) { <nl> + CachedValue * res = <nl> + bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> - } <nl> - for ( size_t i = 0 ; i < 5 ; i + + ) { <nl> - CachedValue * res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ i ] ) ; <nl> - } <nl> <nl> - / / check that insert is ignored if full <nl> - bucket . insert ( hashes [ 5 ] , ptrs [ 5 ] ) ; <nl> - CachedValue * res = bucket . find ( hashes [ 5 ] , ptrs [ 5 ] - > key ( ) , ptrs [ 5 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> + / / check that insert is ignored if full <nl> + bucket . insert ( hashes [ 5 ] , ptrs [ 5 ] ) ; <nl> + CachedValue * res = bucket . find ( hashes [ 5 ] , ptrs [ 5 ] - > key ( ) , ptrs [ 5 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> <nl> - bucket . unlock ( ) ; <nl> + bucket . unlock ( ) ; <nl> <nl> - / / cleanup <nl> - for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> - delete ptrs [ i ] ; <nl> + / / cleanup <nl> + for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> + delete ptrs [ i ] ; <nl> + } <nl> } <nl> - } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test removal <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_removal " ) { <nl> - PlainBucket bucket ; <nl> - bool success ; <nl> - <nl> - uint32_t hashes [ 3 ] = { <nl> - 1 , 2 , 3 } ; / / don ' t have to be real , but should be unique and non - zero <nl> - uint64_t keys [ 3 ] = { 0 , 1 , 2 } ; <nl> - uint64_t values [ 3 ] = { 0 , 1 , 2 } ; <nl> - CachedValue * ptrs [ 3 ] ; <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , & ( values [ i ] ) , <nl> - sizeof ( uint64_t ) ) ; <nl> - } <nl> + SECTION ( " verify removal works correctly " ) { <nl> + PlainBucket bucket ; <nl> + bool success ; <nl> + <nl> + uint32_t hashes [ 3 ] = { <nl> + 1 , 2 , 3 } ; / / don ' t have to be real , but should be unique and non - zero <nl> + uint64_t keys [ 3 ] = { 0 , 1 , 2 } ; <nl> + uint64_t values [ 3 ] = { 0 , 1 , 2 } ; <nl> + CachedValue * ptrs [ 3 ] ; <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> + & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + } <nl> <nl> - success = bucket . lock ( - 1LL ) ; <nl> - CHECK ( success ) ; <nl> + success = bucket . lock ( - 1LL ) ; <nl> + REQUIRE ( success ) ; <nl> <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> - } <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - CachedValue * res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ i ] ) ; <nl> - } <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> + } <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + CachedValue * res = <nl> + bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ i ] ) ; <nl> + } <nl> <nl> - CachedValue * res ; <nl> - res = bucket . remove ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ 1 ] ) ; <nl> - res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - res = bucket . remove ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ 0 ] ) ; <nl> - res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - res = bucket . remove ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ 2 ] ) ; <nl> - res = bucket . find ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - <nl> - bucket . unlock ( ) ; <nl> - <nl> - / / cleanup <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - delete ptrs [ i ] ; <nl> + CachedValue * res ; <nl> + res = bucket . remove ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ 1 ] ) ; <nl> + res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + res = bucket . remove ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ 0 ] ) ; <nl> + res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + res = bucket . remove ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ 2 ] ) ; <nl> + res = bucket . find ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + <nl> + bucket . unlock ( ) ; <nl> + <nl> + / / cleanup <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + delete ptrs [ i ] ; <nl> + } <nl> } <nl> - } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test eviction with subsequent insertion <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_eviction " ) { <nl> - PlainBucket bucket ; <nl> - bool success ; <nl> - <nl> - uint32_t hashes [ 6 ] = { <nl> - 1 , 2 , 3 , <nl> - 4 , 5 , 6 } ; / / don ' t have to be real , but should be unique and non - zero <nl> - uint64_t keys [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> - uint64_t values [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> - CachedValue * ptrs [ 6 ] ; <nl> - for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> - ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , & ( values [ i ] ) , <nl> - sizeof ( uint64_t ) ) ; <nl> - } <nl> + SECTION ( " verify eviction works correctly " ) { <nl> + PlainBucket bucket ; <nl> + bool success ; <nl> + <nl> + uint32_t hashes [ 6 ] = { <nl> + 1 , 2 , 3 , <nl> + 4 , 5 , 6 } ; / / don ' t have to be real , but should be unique and non - zero <nl> + uint64_t keys [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> + uint64_t values [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> + CachedValue * ptrs [ 6 ] ; <nl> + for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> + ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> + & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + } <nl> <nl> - success = bucket . lock ( - 1LL ) ; <nl> - CHECK ( success ) ; <nl> - <nl> - / / insert five to fill <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - for ( size_t i = 0 ; i < 5 ; i + + ) { <nl> - bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> - if ( i < 4 ) { <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - } else { <nl> - CHECK ( bucket . isFull ( ) ) ; <nl> + success = bucket . lock ( - 1LL ) ; <nl> + REQUIRE ( success ) ; <nl> + <nl> + / / insert five to fill <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + for ( size_t i = 0 ; i < 5 ; i + + ) { <nl> + bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> + if ( i < 4 ) { <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + } else { <nl> + REQUIRE ( bucket . isFull ( ) ) ; <nl> + } <nl> + } <nl> + for ( size_t i = 0 ; i < 5 ; i + + ) { <nl> + CachedValue * res = <nl> + bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> - } <nl> - for ( size_t i = 0 ; i < 5 ; i + + ) { <nl> - CachedValue * res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ i ] ) ; <nl> - } <nl> <nl> - / / check that we get proper eviction candidate <nl> - CachedValue * candidate = bucket . evictionCandidate ( ) ; <nl> - CHECK ( candidate = = ptrs [ 0 ] ) ; <nl> - bucket . evict ( candidate , false ) ; <nl> - CachedValue * res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - <nl> - / / check that we still find the right candidate if not full <nl> - candidate = bucket . evictionCandidate ( ) ; <nl> - CHECK ( candidate = = ptrs [ 1 ] ) ; <nl> - bucket . evict ( candidate , true ) ; <nl> - res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - <nl> - / / check that we can insert now after eviction optimized for insertion <nl> - bucket . insert ( hashes [ 5 ] , ptrs [ 5 ] ) ; <nl> - res = bucket . find ( hashes [ 5 ] , ptrs [ 5 ] - > key ( ) , ptrs [ 5 ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ 5 ] ) ; <nl> - <nl> - bucket . unlock ( ) ; <nl> - <nl> - / / cleanup <nl> - for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> - delete ptrs [ i ] ; <nl> + / / check that we get proper eviction candidate <nl> + CachedValue * candidate = bucket . evictionCandidate ( ) ; <nl> + REQUIRE ( candidate = = ptrs [ 0 ] ) ; <nl> + bucket . evict ( candidate , false ) ; <nl> + CachedValue * res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + <nl> + / / check that we still find the right candidate if not full <nl> + candidate = bucket . evictionCandidate ( ) ; <nl> + REQUIRE ( candidate = = ptrs [ 1 ] ) ; <nl> + bucket . evict ( candidate , true ) ; <nl> + res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + <nl> + / / check that we can insert now after eviction optimized for insertion <nl> + bucket . insert ( hashes [ 5 ] , ptrs [ 5 ] ) ; <nl> + res = bucket . find ( hashes [ 5 ] , ptrs [ 5 ] - > key ( ) , ptrs [ 5 ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ 5 ] ) ; <nl> + <nl> + bucket . unlock ( ) ; <nl> + <nl> + / / cleanup <nl> + for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> + delete ptrs [ i ] ; <nl> + } <nl> } <nl> } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - } <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> mmm a / tests / Cache / PlainCache . cpp <nl> ppp b / tests / Cache / PlainCache . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / PlainCache . h " <nl> # include " Basics / Common . h " <nl> - # include " Random / RandomGenerator . h " <nl> - <nl> - # define BOOST_TEST_INCLUDED <nl> - # include < boost / test / unit_test . hpp > <nl> - <nl> + # include " Cache / Common . h " <nl> # include " Cache / Manager . h " <nl> - # include " Cache / PlainCache . h " <nl> + # include " Random / RandomGenerator . h " <nl> <nl> # include " MockScheduler . h " <nl> + # include " catch . hpp " <nl> <nl> # include < stdint . h > <nl> # include < string > <nl> # include < thread > <nl> # include < vector > <nl> <nl> - # include < iostream > <nl> - <nl> using namespace arangodb ; <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - setup / tear - down <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - struct CCachePlainCacheSetup { <nl> - CCachePlainCacheSetup ( ) { BOOST_TEST_MESSAGE ( " setup PlainCache " ) ; } <nl> - <nl> - ~ CCachePlainCacheSetup ( ) { BOOST_TEST_MESSAGE ( " tear - down PlainCache " ) ; } <nl> - } ; <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - BOOST_FIXTURE_TEST_SUITE ( CCachePlainCacheTest , CCachePlainCacheSetup ) <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test construction ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - BOOST_AUTO_TEST_CASE ( tst_st_construction ) { <nl> - Manager manager ( nullptr , 1024ULL * 1024ULL ) ; <nl> - auto cache1 = <nl> - manager . createCache ( Manager : : CacheType : : Plain , 256ULL * 1024ULL , false ) ; <nl> - auto cache2 = <nl> - manager . createCache ( Manager : : CacheType : : Plain , 512ULL * 1024ULL , false ) ; <nl> - <nl> - BOOST_CHECK_EQUAL ( 0ULL , cache1 - > usage ( ) ) ; <nl> - BOOST_CHECK_EQUAL ( 256ULL * 1024ULL , cache1 - > limit ( ) ) ; <nl> - BOOST_CHECK_EQUAL ( 0ULL , cache2 - > usage ( ) ) ; <nl> - BOOST_CHECK ( 512ULL * 1024ULL > cache2 - > limit ( ) ) ; <nl> - <nl> - manager . destroyCache ( cache1 ) ; <nl> - manager . destroyCache ( cache2 ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test insertion ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + TEST_CASE ( " cache : : PlainCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> + SECTION ( " test basic cache creation " ) { <nl> + Manager manager ( nullptr , 1024 * 1024 ) ; <nl> + auto cache1 = manager . createCache ( CacheType : : Plain , false , 256 * 1024 ) ; <nl> + REQUIRE ( true ) ; <nl> + auto cache2 = manager . createCache ( CacheType : : Plain , false , 512 * 1024 ) ; <nl> <nl> - BOOST_AUTO_TEST_CASE ( tst_st_insertion ) { <nl> - uint64_t cacheLimit = 256ULL * 1024ULL ; <nl> - Manager manager ( nullptr , 4ULL * cacheLimit ) ; <nl> - auto cache = <nl> - manager . createCache ( Manager : : CacheType : : Plain , cacheLimit , false ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - BOOST_CHECK ( success ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - BOOST_CHECK ( f . found ( ) ) ; <nl> - } <nl> + REQUIRE ( 0 = = cache1 - > usage ( ) ) ; <nl> + REQUIRE ( 256 * 1024 > = cache1 - > size ( ) ) ; <nl> + REQUIRE ( 0 = = cache2 - > usage ( ) ) ; <nl> + REQUIRE ( 512 * 1024 > = cache2 - > size ( ) ) ; <nl> <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - uint64_t j = 2 * i ; <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & j , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - BOOST_CHECK ( success ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - BOOST_CHECK ( f . found ( ) ) ; <nl> - BOOST_CHECK ( 0 = = memcmp ( f . value ( ) - > value ( ) , & j , sizeof ( uint64_t ) ) ) ; <nl> + manager . destroyCache ( cache1 ) ; <nl> + manager . destroyCache ( cache2 ) ; <nl> } <nl> <nl> - uint64_t notInserted = 0 ; <nl> - for ( uint64_t i = 1024 ; i < 128 * 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - if ( success ) { <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - BOOST_CHECK ( f . found ( ) ) ; <nl> - } else { <nl> - delete value ; <nl> - notInserted + + ; <nl> + SECTION ( " check that insertion works as expected " ) { <nl> + uint64_t cacheLimit = 256 * 1024 ; <nl> + Manager manager ( nullptr , 4 * cacheLimit ) ; <nl> + auto cache = manager . createCache ( CacheType : : Plain , false , cacheLimit ) ; <nl> + <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> } <nl> - } <nl> - BOOST_CHECK ( notInserted > 0 ) ; <nl> <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + uint64_t j = 2 * i ; <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & j , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + REQUIRE ( 0 = = memcmp ( f . value ( ) - > value ( ) , & j , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> + } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test removal ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + for ( uint64_t i = 1024 ; i < 256 * 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> + } <nl> + REQUIRE ( cache - > size ( ) < = 256 * 1024 ) ; <nl> <nl> - BOOST_AUTO_TEST_CASE ( tst_st_removal ) { <nl> - uint64_t cacheLimit = 256ULL * 1024ULL ; <nl> - Manager manager ( nullptr , 4ULL * cacheLimit ) ; <nl> - auto cache = <nl> - manager . createCache ( Manager : : CacheType : : Plain , cacheLimit , false ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - BOOST_CHECK ( success ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - BOOST_CHECK ( f . found ( ) ) ; <nl> - BOOST_CHECK ( f . value ( ) ! = nullptr ) ; <nl> - BOOST_CHECK ( f . value ( ) - > sameKey ( & i , sizeof ( uint64_t ) ) ) ; <nl> + manager . destroyCache ( cache ) ; <nl> } <nl> <nl> - / / test removal of bogus keys <nl> - for ( uint64_t i = 1024 ; i < 2048 ; i + + ) { <nl> - bool removed = cache - > remove ( & i , sizeof ( uint64_t ) ) ; <nl> - BOOST_ASSERT ( removed ) ; <nl> - / / ensure existing keys not removed <nl> + SECTION ( " test that removal works as expected " ) { <nl> + uint64_t cacheLimit = 256 * 1024 ; <nl> + Manager manager ( nullptr , 4 * cacheLimit ) ; <nl> + auto cache = manager . createCache ( CacheType : : Plain , false , cacheLimit ) ; <nl> + <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + REQUIRE ( f . value ( ) ! = nullptr ) ; <nl> + REQUIRE ( f . value ( ) - > sameKey ( & i , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> + } <nl> + uint64_t inserted = 0 ; <nl> for ( uint64_t j = 0 ; j < 1024 ; j + + ) { <nl> auto f = cache - > find ( & j , sizeof ( uint64_t ) ) ; <nl> - BOOST_CHECK ( f . found ( ) ) ; <nl> - BOOST_CHECK ( f . value ( ) ! = nullptr ) ; <nl> - BOOST_CHECK ( f . value ( ) - > sameKey ( & j , sizeof ( uint64_t ) ) ) ; <nl> - } <nl> - } <nl> - <nl> - / / remove actual keys <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - bool removed = cache - > remove ( & i , sizeof ( uint64_t ) ) ; <nl> - BOOST_CHECK ( removed ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - BOOST_CHECK ( ! f . found ( ) ) ; <nl> - } <nl> - <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test growth behavior ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - BOOST_AUTO_TEST_CASE ( tst_st_growth ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - uint64_t minimumSize = 64ULL * initialSize ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> - auto cache = <nl> - manager . createCache ( Manager : : CacheType : : Plain , initialSize , true ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 4ULL * 1024ULL * 1024ULL ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - if ( ! success ) { <nl> - delete value ; <nl> + if ( f . found ( ) ) { <nl> + inserted + + ; <nl> + REQUIRE ( f . value ( ) ! = nullptr ) ; <nl> + REQUIRE ( f . value ( ) - > sameKey ( & j , sizeof ( uint64_t ) ) ) ; <nl> + } <nl> } <nl> - } <nl> - <nl> - BOOST_CHECK ( cache - > usage ( ) > minimumSize ) ; <nl> <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test shrink behavior ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - BOOST_AUTO_TEST_CASE ( tst_st_shrink ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> - auto cache = <nl> - manager . createCache ( Manager : : CacheType : : Plain , initialSize , true ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 16ULL * 1024ULL * 1024ULL ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - if ( ! success ) { <nl> - delete value ; <nl> + / / test removal of bogus keys <nl> + for ( uint64_t i = 1024 ; i < 2048 ; i + + ) { <nl> + bool removed = cache - > remove ( & i , sizeof ( uint64_t ) ) ; <nl> + BOOST_ASSERT ( removed ) ; <nl> + / / ensure existing keys not removed <nl> + uint64_t found = 0 ; <nl> + for ( uint64_t j = 0 ; j < 1024 ; j + + ) { <nl> + auto f = cache - > find ( & j , sizeof ( uint64_t ) ) ; <nl> + if ( f . found ( ) ) { <nl> + found + + ; <nl> + REQUIRE ( f . value ( ) ! = nullptr ) ; <nl> + REQUIRE ( f . value ( ) - > sameKey ( & j , sizeof ( uint64_t ) ) ) ; <nl> + } <nl> + } <nl> + REQUIRE ( inserted = = found ) ; <nl> } <nl> - } <nl> <nl> - cache - > disableGrowth ( ) ; <nl> - uint64_t target = cache - > usage ( ) / 2 ; <nl> - while ( ! cache - > resize ( target ) ) { <nl> - } ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 16ULL * 1024ULL * 1024ULL ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - if ( ! success ) { <nl> - delete value ; <nl> + / / remove actual keys <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + bool removed = cache - > remove ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( removed ) ; <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( ! f . found ( ) ) ; <nl> } <nl> - } <nl> <nl> - while ( cache - > isResizing ( ) ) { <nl> + manager . destroyCache ( cache ) ; <nl> } <nl> - BOOST_CHECK_MESSAGE ( cache - > usage ( ) < = target , <nl> - cache - > usage ( ) < < " ! < = " < < target ) ; <nl> - <nl> - manager . destroyCache ( cache ) ; <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test mixed load behavior ( multi - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - BOOST_AUTO_TEST_CASE ( tst_mt_mixed_load ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> - size_t threadCount = 4 ; <nl> - std : : shared_ptr < Cache > cache = <nl> - manager . createCache ( Manager : : CacheType : : Plain , initialSize , true ) ; <nl> - <nl> - uint64_t chunkSize = 16 * 1024 * 1024 ; <nl> - uint64_t initialInserts = 4 * 1024 * 1024 ; <nl> - uint64_t operationCount = 16 * 1024 * 1024 ; <nl> - std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> - std : : atomic < uint64_t > missCount ( 0 ) ; <nl> - auto worker = [ & manager , & cache , initialInserts , operationCount , & hitCount , <nl> - & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> - / / fill with some initial data <nl> - for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> - uint64_t item = lower + i ; <nl> - CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> - & item , sizeof ( uint64_t ) ) ; <nl> - bool ok = cache - > insert ( value ) ; <nl> - if ( ! ok ) { <nl> + SECTION ( " verify that cache can indeed grow when it runs out of space " ) { <nl> + uint64_t minimumUsage = 1024 * 1024 ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 1024 * 1024 * 1024 ) ; <nl> + auto cache = manager . createCache ( CacheType : : Plain ) ; <nl> + <nl> + for ( uint64_t i = 0 ; i < 4 * 1024 * 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( ! success ) { <nl> delete value ; <nl> } <nl> } <nl> <nl> - / / initialize valid range for keys that * might * be in cache <nl> - uint64_t validLower = lower ; <nl> - uint64_t validUpper = lower + initialInserts - 1 ; <nl> - <nl> - / / commence mixed workload <nl> - for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> - uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99UL ) ) ; <nl> + CHECK ( cache - > usage ( ) > minimumUsage ) ; <nl> <nl> - if ( r > = 99 ) { / / remove something <nl> - if ( validLower = = validUpper ) { <nl> - continue ; / / removed too much <nl> - } <nl> - <nl> - uint64_t item = validLower + + ; <nl> - <nl> - cache - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> - } else if ( r > = 95 ) { / / insert something <nl> - if ( validUpper = = upper ) { <nl> - continue ; / / already maxed out range <nl> - } <nl> + manager . destroyCache ( cache ) ; <nl> + } <nl> <nl> - uint64_t item = + + validUpper ; <nl> + SECTION ( " test behavior under mixed load " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 1024 * 1024 * 1024 ) ; <nl> + size_t threadCount = 4 ; <nl> + std : : shared_ptr < Cache > cache = manager . createCache ( CacheType : : Plain ) ; <nl> + <nl> + uint64_t chunkSize = 16 * 1024 * 1024 ; <nl> + uint64_t initialInserts = 4 * 1024 * 1024 ; <nl> + uint64_t operationCount = 16 * 1024 * 1024 ; <nl> + std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> + std : : atomic < uint64_t > missCount ( 0 ) ; <nl> + auto worker = [ & manager , & cache , initialInserts , operationCount , & hitCount , <nl> + & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> + / / fill with some initial data <nl> + for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> + uint64_t item = lower + i ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> bool ok = cache - > insert ( value ) ; <nl> if ( ! ok ) { <nl> delete value ; <nl> } <nl> - } else { / / lookup something <nl> - uint64_t item = RandomGenerator : : interval ( <nl> - static_cast < int64_t > ( validLower ) , static_cast < int64_t > ( validUpper ) ) ; <nl> + } <nl> <nl> - Cache : : Finding f = cache - > find ( & item , sizeof ( uint64_t ) ) ; <nl> - if ( f . found ( ) ) { <nl> - hitCount + + ; <nl> - TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> - TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> - } else { <nl> - missCount + + ; <nl> - TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> + / / initialize valid range for keys that * might * be in cache <nl> + uint64_t validLower = lower ; <nl> + uint64_t validUpper = lower + initialInserts - 1 ; <nl> + <nl> + / / commence mixed workload <nl> + for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> + uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99UL ) ) ; <nl> + <nl> + if ( r > = 99 ) { / / remove something <nl> + if ( validLower = = validUpper ) { <nl> + continue ; / / removed too much <nl> + } <nl> + <nl> + uint64_t item = validLower + + ; <nl> + <nl> + cache - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> + } else if ( r > = 95 ) { / / insert something <nl> + if ( validUpper = = upper ) { <nl> + continue ; / / already maxed out range <nl> + } <nl> + <nl> + uint64_t item = + + validUpper ; <nl> + CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> + & item , sizeof ( uint64_t ) ) ; <nl> + bool ok = cache - > insert ( value ) ; <nl> + if ( ! ok ) { <nl> + delete value ; <nl> + } <nl> + } else { / / lookup something <nl> + uint64_t item = <nl> + RandomGenerator : : interval ( static_cast < int64_t > ( validLower ) , <nl> + static_cast < int64_t > ( validUpper ) ) ; <nl> + <nl> + Cache : : Finding f = cache - > find ( & item , sizeof ( uint64_t ) ) ; <nl> + if ( f . found ( ) ) { <nl> + hitCount + + ; <nl> + TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> + TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + missCount + + ; <nl> + TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> + } <nl> } <nl> } <nl> + } ; <nl> + <nl> + std : : vector < std : : thread * > threads ; <nl> + / / dispatch threads <nl> + for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> + uint64_t lower = i * chunkSize ; <nl> + uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> + threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> } <nl> - } ; <nl> - <nl> - std : : vector < std : : thread * > threads ; <nl> - / / dispatch threads <nl> - for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> - uint64_t lower = i * chunkSize ; <nl> - uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> - threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> - } <nl> - <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> - } <nl> <nl> - manager . destroyCache ( cache ) ; <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test statistics ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> + } <nl> <nl> - BOOST_AUTO_TEST_CASE ( tst_st_stats ) { <nl> - uint64_t cacheLimit = 256ULL * 1024ULL ; <nl> - Manager manager ( nullptr , 4ULL * cacheLimit ) ; <nl> - auto cacheMiss = <nl> - manager . createCache ( Manager : : CacheType : : Plain , cacheLimit , false , true ) ; <nl> - auto cacheHit = <nl> - manager . createCache ( Manager : : CacheType : : Plain , cacheLimit , false , true ) ; <nl> - auto cacheMixed = <nl> - manager . createCache ( Manager : : CacheType : : Plain , cacheLimit , false , true ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cacheHit - > insert ( value ) ; <nl> - BOOST_CHECK ( success ) ; <nl> - value = CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - success = cacheMiss - > insert ( value ) ; <nl> - BOOST_CHECK ( success ) ; <nl> - value = CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - success = cacheMixed - > insert ( value ) ; <nl> - BOOST_CHECK ( success ) ; <nl> + manager . destroyCache ( cache ) ; <nl> + RandomGenerator : : shutdown ( ) ; <nl> } <nl> <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - auto f = cacheHit - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - } <nl> - { <nl> - auto cacheStats = cacheHit - > hitRates ( ) ; <nl> - auto managerStats = manager . globalHitRates ( ) ; <nl> - BOOST_CHECK_EQUAL ( cacheStats . first , 100 . 0 ) ; <nl> - BOOST_CHECK_EQUAL ( cacheStats . second , 100 . 0 ) ; <nl> - BOOST_CHECK_EQUAL ( managerStats . first , 100 . 0 ) ; <nl> - BOOST_CHECK_EQUAL ( managerStats . second , 100 . 0 ) ; <nl> - } <nl> + SECTION ( " test hit rate statistics reporting " ) { <nl> + uint64_t cacheLimit = 256 * 1024 ; <nl> + Manager manager ( nullptr , 4 * cacheLimit ) ; <nl> + auto cacheMiss = manager . createCache ( CacheType : : Plain , true , cacheLimit ) ; <nl> + auto cacheHit = manager . createCache ( CacheType : : Plain , true , cacheLimit ) ; <nl> + auto cacheMixed = manager . createCache ( CacheType : : Plain , true , cacheLimit ) ; <nl> + <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + if ( ! cacheHit - > insert ( value ) ) { <nl> + delete value ; <nl> + } <nl> <nl> - for ( uint64_t i = 1024 ; i < 2048 ; i + + ) { <nl> - auto f = cacheMiss - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - } <nl> - { <nl> - auto cacheStats = cacheMiss - > hitRates ( ) ; <nl> - auto managerStats = manager . globalHitRates ( ) ; <nl> - BOOST_CHECK_EQUAL ( cacheStats . first , 0 . 0 ) ; <nl> - BOOST_CHECK_EQUAL ( cacheStats . second , 0 . 0 ) ; <nl> - BOOST_CHECK ( managerStats . first > 49 . 0 ) ; <nl> - BOOST_CHECK ( managerStats . first < 51 . 0 ) ; <nl> - BOOST_CHECK ( managerStats . second > 49 . 0 ) ; <nl> - BOOST_CHECK ( managerStats . second < 51 . 0 ) ; <nl> - } <nl> + value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + if ( ! cacheMiss - > insert ( value ) ) { <nl> + delete value ; <nl> + } <nl> <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - auto f = cacheMixed - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - } <nl> - for ( uint64_t i = 1024 ; i < 2048 ; i + + ) { <nl> - auto f = cacheMixed - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - } <nl> - { <nl> - auto cacheStats = cacheMixed - > hitRates ( ) ; <nl> - auto managerStats = manager . globalHitRates ( ) ; <nl> - BOOST_CHECK ( cacheStats . first > 49 . 0 ) ; <nl> - BOOST_CHECK ( cacheStats . first < 51 . 0 ) ; <nl> - BOOST_CHECK ( cacheStats . second > 49 . 0 ) ; <nl> - BOOST_CHECK ( cacheStats . second < 51 . 0 ) ; <nl> - BOOST_CHECK ( managerStats . first > 49 . 0 ) ; <nl> - BOOST_CHECK ( managerStats . first < 51 . 0 ) ; <nl> - BOOST_CHECK ( managerStats . second > 49 . 0 ) ; <nl> - BOOST_CHECK ( managerStats . second < 51 . 0 ) ; <nl> - } <nl> + value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + if ( ! cacheMixed - > insert ( value ) ) { <nl> + delete value ; <nl> + } <nl> + } <nl> <nl> - manager . destroyCache ( cacheHit ) ; <nl> - manager . destroyCache ( cacheMiss ) ; <nl> - manager . destroyCache ( cacheMixed ) ; <nl> - } <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + auto f = cacheHit - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + } <nl> + { <nl> + auto cacheStats = cacheHit - > hitRates ( ) ; <nl> + auto managerStats = manager . globalHitRates ( ) ; <nl> + REQUIRE ( cacheStats . first > = 85 . 0 ) ; <nl> + REQUIRE ( cacheStats . second > = 85 . 0 ) ; <nl> + REQUIRE ( managerStats . first > = 85 . 0 ) ; <nl> + REQUIRE ( managerStats . second > = 85 . 0 ) ; <nl> + } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + for ( uint64_t i = 1024 ; i < 2048 ; i + + ) { <nl> + auto f = cacheMiss - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + } <nl> + { <nl> + auto cacheStats = cacheMiss - > hitRates ( ) ; <nl> + auto managerStats = manager . globalHitRates ( ) ; <nl> + REQUIRE ( cacheStats . first = = 0 . 0 ) ; <nl> + REQUIRE ( cacheStats . second = = 0 . 0 ) ; <nl> + REQUIRE ( managerStats . first > 40 . 0 ) ; <nl> + REQUIRE ( managerStats . first < 50 . 0 ) ; <nl> + REQUIRE ( managerStats . second > 40 . 0 ) ; <nl> + REQUIRE ( managerStats . second < 50 . 0 ) ; <nl> + } <nl> <nl> - BOOST_AUTO_TEST_SUITE_END ( ) <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + auto f = cacheMixed - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + } <nl> + for ( uint64_t i = 1024 ; i < 2048 ; i + + ) { <nl> + auto f = cacheMixed - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + } <nl> + { <nl> + auto cacheStats = cacheMixed - > hitRates ( ) ; <nl> + auto managerStats = manager . globalHitRates ( ) ; <nl> + REQUIRE ( cacheStats . first > 40 . 0 ) ; <nl> + REQUIRE ( cacheStats . first < 50 . 0 ) ; <nl> + REQUIRE ( cacheStats . second > 40 . 0 ) ; <nl> + REQUIRE ( cacheStats . second < 50 . 0 ) ; <nl> + REQUIRE ( managerStats . first > 40 . 0 ) ; <nl> + REQUIRE ( managerStats . first < 50 . 0 ) ; <nl> + REQUIRE ( managerStats . second > 40 . 0 ) ; <nl> + REQUIRE ( managerStats . second < 50 . 0 ) ; <nl> + } <nl> <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> + manager . destroyCache ( cacheHit ) ; <nl> + manager . destroyCache ( cacheMiss ) ; <nl> + manager . destroyCache ( cacheMixed ) ; <nl> + } <nl> + } <nl> mmm a / tests / Cache / Rebalancer . cpp <nl> ppp b / tests / Cache / Rebalancer . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / Rebalancer . h " <nl> # include " Basics / Common . h " <nl> - # include " Random / RandomGenerator . h " <nl> - <nl> - # include " catch . hpp " <nl> - <nl> + # include " Cache / Common . h " <nl> # include " Cache / Manager . h " <nl> # include " Cache / PlainCache . h " <nl> - # include " Cache / Rebalancer . h " <nl> # include " Cache / Transaction . h " <nl> # include " Cache / TransactionalCache . h " <nl> + # include " Random / RandomGenerator . h " <nl> <nl> # include " MockScheduler . h " <nl> + # include " catch . hpp " <nl> <nl> # include < stdint . h > <nl> # include < queue > <nl> <nl> using namespace arangodb ; <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - TEST_CASE ( " CCacheRebalancerTest " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test rebalancing plain caches ( multi - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_rebalancing_plain " ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 128ULL * 1024ULL * 1024ULL ) ; <nl> - Rebalancer rebalancer ( & manager ) ; <nl> - <nl> - size_t cacheCount = 4 ; <nl> - size_t threadCount = 4 ; <nl> - std : : vector < std : : shared_ptr < Cache > > caches ; <nl> - for ( size_t i = 0 ; i < cacheCount ; i + + ) { <nl> - caches . emplace_back ( <nl> - manager . createCache ( Manager : : CacheType : : Plain , initialSize , true ) ) ; <nl> - } <nl> - <nl> - bool doneRebalancing = false ; <nl> - auto rebalanceWorker = [ & rebalancer , & doneRebalancing ] ( ) - > void { <nl> - while ( ! doneRebalancing ) { <nl> - bool rebalanced = rebalancer . rebalance ( ) ; <nl> - if ( rebalanced ) { <nl> - usleep ( 500 * 1000 ) ; <nl> - } else { <nl> - usleep ( 100 ) ; <nl> - } <nl> + TEST_CASE ( " cache : : Rebalancer " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> + SECTION ( " test rebalancing with PlainCache " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 128 * 1024 * 1024 ) ; <nl> + Rebalancer rebalancer ( & manager ) ; <nl> + <nl> + size_t cacheCount = 4 ; <nl> + size_t threadCount = 4 ; <nl> + std : : vector < std : : shared_ptr < Cache > > caches ; <nl> + for ( size_t i = 0 ; i < cacheCount ; i + + ) { <nl> + caches . emplace_back ( manager . createCache ( CacheType : : Plain ) ) ; <nl> } <nl> - } ; <nl> - auto rebalancerThread = new std : : thread ( rebalanceWorker ) ; <nl> - <nl> - uint64_t chunkSize = 4 * 1024 * 1024 ; <nl> - uint64_t initialInserts = 1 * 1024 * 1024 ; <nl> - uint64_t operationCount = 4 * 1024 * 1024 ; <nl> - std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> - std : : atomic < uint64_t > missCount ( 0 ) ; <nl> - auto worker = [ & manager , & caches , cacheCount , initialInserts , operationCount , <nl> - & hitCount , <nl> - & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> - / / fill with some initial data <nl> - for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> - uint64_t item = lower + i ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> - CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> - & item , sizeof ( uint64_t ) ) ; <nl> - bool ok = caches [ cacheIndex ] - > insert ( value ) ; <nl> - if ( ! ok ) { <nl> - delete value ; <nl> - } <nl> - } <nl> - <nl> - / / initialize valid range for keys that * might * be in cache <nl> - uint64_t validLower = lower ; <nl> - uint64_t validUpper = lower + initialInserts - 1 ; <nl> <nl> - / / commence mixed workload <nl> - for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> - uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99UL ) ) ; <nl> - <nl> - if ( r > = 99 ) { / / remove something <nl> - if ( validLower = = validUpper ) { <nl> - continue ; / / removed too much <nl> - } <nl> - <nl> - uint64_t item = validLower + + ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> - <nl> - caches [ cacheIndex ] - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> - } else if ( r > = 95 ) { / / insert something <nl> - if ( validUpper = = upper ) { <nl> - continue ; / / already maxed out range <nl> + bool doneRebalancing = false ; <nl> + auto rebalanceWorker = [ & rebalancer , & doneRebalancing ] ( ) - > void { <nl> + while ( ! doneRebalancing ) { <nl> + bool rebalanced = rebalancer . rebalance ( ) ; <nl> + if ( rebalanced ) { <nl> + usleep ( 500 * 1000 ) ; <nl> + } else { <nl> + usleep ( 100 ) ; <nl> } <nl> - <nl> - uint64_t item = + + validUpper ; <nl> + } <nl> + } ; <nl> + auto rebalancerThread = new std : : thread ( rebalanceWorker ) ; <nl> + <nl> + uint64_t chunkSize = 4 * 1024 * 1024 ; <nl> + uint64_t initialInserts = 1 * 1024 * 1024 ; <nl> + uint64_t operationCount = 4 * 1024 * 1024 ; <nl> + std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> + std : : atomic < uint64_t > missCount ( 0 ) ; <nl> + auto worker = [ & manager , & caches , cacheCount , initialInserts , <nl> + operationCount , & hitCount , <nl> + & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> + / / fill with some initial data <nl> + for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> + uint64_t item = lower + i ; <nl> size_t cacheIndex = item % cacheCount ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> SECTION ( " tst_rebalancing_plain " ) { <nl> if ( ! ok ) { <nl> delete value ; <nl> } <nl> - } else { / / lookup something <nl> - uint64_t item = RandomGenerator : : interval ( <nl> - static_cast < int64_t > ( validLower ) , static_cast < int64_t > ( validUpper ) ) ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> + } <nl> <nl> - Cache : : Finding f = caches [ cacheIndex ] - > find ( & item , sizeof ( uint64_t ) ) ; <nl> - if ( f . found ( ) ) { <nl> - hitCount + + ; <nl> - TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> - TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> - } else { <nl> - missCount + + ; <nl> - TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> + / / initialize valid range for keys that * might * be in cache <nl> + uint64_t validLower = lower ; <nl> + uint64_t validUpper = lower + initialInserts - 1 ; <nl> + <nl> + / / commence mixed workload <nl> + for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> + uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99UL ) ) ; <nl> + <nl> + if ( r > = 99 ) { / / remove something <nl> + if ( validLower = = validUpper ) { <nl> + continue ; / / removed too much <nl> + } <nl> + <nl> + uint64_t item = validLower + + ; <nl> + size_t cacheIndex = item % cacheCount ; <nl> + <nl> + caches [ cacheIndex ] - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> + } else if ( r > = 95 ) { / / insert something <nl> + if ( validUpper = = upper ) { <nl> + continue ; / / already maxed out range <nl> + } <nl> + <nl> + uint64_t item = + + validUpper ; <nl> + size_t cacheIndex = item % cacheCount ; <nl> + CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> + & item , sizeof ( uint64_t ) ) ; <nl> + bool ok = caches [ cacheIndex ] - > insert ( value ) ; <nl> + if ( ! ok ) { <nl> + delete value ; <nl> + } <nl> + } else { / / lookup something <nl> + uint64_t item = <nl> + RandomGenerator : : interval ( static_cast < int64_t > ( validLower ) , <nl> + static_cast < int64_t > ( validUpper ) ) ; <nl> + size_t cacheIndex = item % cacheCount ; <nl> + <nl> + Cache : : Finding f = caches [ cacheIndex ] - > find ( & item , sizeof ( uint64_t ) ) ; <nl> + if ( f . found ( ) ) { <nl> + hitCount + + ; <nl> + TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> + TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + missCount + + ; <nl> + TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> + } <nl> } <nl> } <nl> + } ; <nl> + <nl> + std : : vector < std : : thread * > threads ; <nl> + / / dispatch threads <nl> + for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> + uint64_t lower = i * chunkSize ; <nl> + uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> + threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> } <nl> - } ; <nl> - <nl> - std : : vector < std : : thread * > threads ; <nl> - / / dispatch threads <nl> - for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> - uint64_t lower = i * chunkSize ; <nl> - uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> - threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> - } <nl> - <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> - } <nl> - <nl> - doneRebalancing = true ; <nl> - rebalancerThread - > join ( ) ; <nl> - delete rebalancerThread ; <nl> <nl> - for ( auto cache : caches ) { <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> + } <nl> <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> + doneRebalancing = true ; <nl> + rebalancerThread - > join ( ) ; <nl> + delete rebalancerThread ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test rebalancing transactional caches ( multi - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + for ( auto cache : caches ) { <nl> + manager . destroyCache ( cache ) ; <nl> + } <nl> <nl> - SECTION ( " tst_rebalancing_transactional " ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 128ULL * 1024ULL * 1024ULL ) ; <nl> - Rebalancer rebalancer ( & manager ) ; <nl> - <nl> - size_t cacheCount = 4 ; <nl> - size_t threadCount = 4 ; <nl> - std : : vector < std : : shared_ptr < Cache > > caches ; <nl> - for ( size_t i = 0 ; i < cacheCount ; i + + ) { <nl> - caches . emplace_back ( manager . createCache ( Manager : : CacheType : : Transactional , <nl> - initialSize , true ) ) ; <nl> + RandomGenerator : : shutdown ( ) ; <nl> } <nl> <nl> - bool doneRebalancing = false ; <nl> - auto rebalanceWorker = [ & rebalancer , & doneRebalancing ] ( ) - > void { <nl> - while ( ! doneRebalancing ) { <nl> - bool rebalanced = rebalancer . rebalance ( ) ; <nl> - if ( rebalanced ) { <nl> - usleep ( 500 * 1000 ) ; <nl> - } else { <nl> - usleep ( 100 ) ; <nl> - } <nl> + SECTION ( " test rebalancing with TransactionalCache " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 128 * 1024 * 1024 ) ; <nl> + Rebalancer rebalancer ( & manager ) ; <nl> + <nl> + size_t cacheCount = 4 ; <nl> + size_t threadCount = 4 ; <nl> + std : : vector < std : : shared_ptr < Cache > > caches ; <nl> + for ( size_t i = 0 ; i < cacheCount ; i + + ) { <nl> + caches . emplace_back ( manager . createCache ( CacheType : : Transactional ) ) ; <nl> } <nl> - } ; <nl> - auto rebalancerThread = new std : : thread ( rebalanceWorker ) ; <nl> - <nl> - uint64_t chunkSize = 4 * 1024 * 1024 ; <nl> - uint64_t initialInserts = 1 * 1024 * 1024 ; <nl> - uint64_t operationCount = 4 * 1024 * 1024 ; <nl> - std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> - std : : atomic < uint64_t > missCount ( 0 ) ; <nl> - auto worker = [ & manager , & caches , cacheCount , initialInserts , operationCount , <nl> - & hitCount , <nl> - & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> - Transaction * tx = manager . beginTransaction ( false ) ; <nl> - / / fill with some initial data <nl> - for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> - uint64_t item = lower + i ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> - CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> - & item , sizeof ( uint64_t ) ) ; <nl> - bool ok = caches [ cacheIndex ] - > insert ( value ) ; <nl> - if ( ! ok ) { <nl> - delete value ; <nl> - } <nl> - } <nl> - <nl> - / / initialize valid range for keys that * might * be in cache <nl> - uint64_t validLower = lower ; <nl> - uint64_t validUpper = lower + initialInserts - 1 ; <nl> - uint64_t blacklistUpper = validUpper ; <nl> <nl> - / / commence mixed workload <nl> - for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> - uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99UL ) ) ; <nl> - <nl> - if ( r > = 99 ) { / / remove something <nl> - if ( validLower = = validUpper ) { <nl> - continue ; / / removed too much <nl> - } <nl> - <nl> - uint64_t item = validLower + + ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> - <nl> - caches [ cacheIndex ] - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> - } else if ( r > = 90 ) { / / insert something <nl> - if ( validUpper = = upper ) { <nl> - continue ; / / already maxed out range <nl> - } <nl> - <nl> - uint64_t item = + + validUpper ; <nl> - if ( validUpper > blacklistUpper ) { <nl> - blacklistUpper = validUpper ; <nl> + bool doneRebalancing = false ; <nl> + auto rebalanceWorker = [ & rebalancer , & doneRebalancing ] ( ) - > void { <nl> + while ( ! doneRebalancing ) { <nl> + bool rebalanced = rebalancer . rebalance ( ) ; <nl> + if ( rebalanced ) { <nl> + usleep ( 500 * 1000 ) ; <nl> + } else { <nl> + usleep ( 100 ) ; <nl> } <nl> + } <nl> + } ; <nl> + auto rebalancerThread = new std : : thread ( rebalanceWorker ) ; <nl> + <nl> + uint64_t chunkSize = 4 * 1024 * 1024 ; <nl> + uint64_t initialInserts = 1 * 1024 * 1024 ; <nl> + uint64_t operationCount = 4 * 1024 * 1024 ; <nl> + std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> + std : : atomic < uint64_t > missCount ( 0 ) ; <nl> + auto worker = [ & manager , & caches , cacheCount , initialInserts , <nl> + operationCount , & hitCount , <nl> + & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> + Transaction * tx = manager . beginTransaction ( false ) ; <nl> + / / fill with some initial data <nl> + for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> + uint64_t item = lower + i ; <nl> size_t cacheIndex = item % cacheCount ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> SECTION ( " tst_rebalancing_transactional " ) { <nl> if ( ! ok ) { <nl> delete value ; <nl> } <nl> - } else if ( r > = 80 ) { / / blacklist something <nl> - if ( blacklistUpper = = upper ) { <nl> - continue ; / / already maxed out range <nl> - } <nl> - <nl> - uint64_t item = + + blacklistUpper ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> - caches [ cacheIndex ] - > blacklist ( & item , sizeof ( uint64_t ) ) ; <nl> - } else { / / lookup something <nl> - uint64_t item = RandomGenerator : : interval ( <nl> - static_cast < int64_t > ( validLower ) , static_cast < int64_t > ( validUpper ) ) ; <nl> - size_t cacheIndex = item % cacheCount ; <nl> + } <nl> <nl> - Cache : : Finding f = caches [ cacheIndex ] - > find ( & item , sizeof ( uint64_t ) ) ; <nl> - if ( f . found ( ) ) { <nl> - hitCount + + ; <nl> - TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> - TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> - } else { <nl> - missCount + + ; <nl> - TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> + / / initialize valid range for keys that * might * be in cache <nl> + uint64_t validLower = lower ; <nl> + uint64_t validUpper = lower + initialInserts - 1 ; <nl> + uint64_t blacklistUpper = validUpper ; <nl> + <nl> + / / commence mixed workload <nl> + for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> + uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99UL ) ) ; <nl> + <nl> + if ( r > = 99 ) { / / remove something <nl> + if ( validLower = = validUpper ) { <nl> + continue ; / / removed too much <nl> + } <nl> + <nl> + uint64_t item = validLower + + ; <nl> + size_t cacheIndex = item % cacheCount ; <nl> + <nl> + caches [ cacheIndex ] - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> + } else if ( r > = 90 ) { / / insert something <nl> + if ( validUpper = = upper ) { <nl> + continue ; / / already maxed out range <nl> + } <nl> + <nl> + uint64_t item = + + validUpper ; <nl> + if ( validUpper > blacklistUpper ) { <nl> + blacklistUpper = validUpper ; <nl> + } <nl> + size_t cacheIndex = item % cacheCount ; <nl> + CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> + & item , sizeof ( uint64_t ) ) ; <nl> + bool ok = caches [ cacheIndex ] - > insert ( value ) ; <nl> + if ( ! ok ) { <nl> + delete value ; <nl> + } <nl> + } else if ( r > = 80 ) { / / blacklist something <nl> + if ( blacklistUpper = = upper ) { <nl> + continue ; / / already maxed out range <nl> + } <nl> + <nl> + uint64_t item = + + blacklistUpper ; <nl> + size_t cacheIndex = item % cacheCount ; <nl> + caches [ cacheIndex ] - > blacklist ( & item , sizeof ( uint64_t ) ) ; <nl> + } else { / / lookup something <nl> + uint64_t item = <nl> + RandomGenerator : : interval ( static_cast < int64_t > ( validLower ) , <nl> + static_cast < int64_t > ( validUpper ) ) ; <nl> + size_t cacheIndex = item % cacheCount ; <nl> + <nl> + Cache : : Finding f = caches [ cacheIndex ] - > find ( & item , sizeof ( uint64_t ) ) ; <nl> + if ( f . found ( ) ) { <nl> + hitCount + + ; <nl> + TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> + TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + missCount + + ; <nl> + TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> + } <nl> } <nl> } <nl> + manager . endTransaction ( tx ) ; <nl> + } ; <nl> + <nl> + std : : vector < std : : thread * > threads ; <nl> + / / dispatch threads <nl> + for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> + uint64_t lower = i * chunkSize ; <nl> + uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> + threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> } <nl> - manager . endTransaction ( tx ) ; <nl> - } ; <nl> - <nl> - std : : vector < std : : thread * > threads ; <nl> - / / dispatch threads <nl> - for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> - uint64_t lower = i * chunkSize ; <nl> - uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> - threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> - } <nl> - <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> - } <nl> <nl> - doneRebalancing = true ; <nl> - rebalancerThread - > join ( ) ; <nl> - delete rebalancerThread ; <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> + } <nl> <nl> - for ( auto cache : caches ) { <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> + doneRebalancing = true ; <nl> + rebalancerThread - > join ( ) ; <nl> + delete rebalancerThread ; <nl> <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + for ( auto cache : caches ) { <nl> + manager . destroyCache ( cache ) ; <nl> + } <nl> <nl> + RandomGenerator : : shutdown ( ) ; <nl> + } <nl> } <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> deleted file mode 100644 <nl> index b742ba38849 . . 00000000000 <nl> mmm a / tests / Cache / Runner . cpp <nl> ppp / dev / null <nl> <nl> - # define BOOST_TEST_MODULE " C / C + + Unit Tests for ArangoDB Cache " <nl> - # include < boost / test / included / unit_test . hpp > <nl> mmm a / tests / Cache / State . cpp <nl> ppp b / tests / Cache / State . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / State . h " <nl> # include " Basics / Common . h " <nl> <nl> # include " catch . hpp " <nl> <nl> - # include " Cache / State . h " <nl> - <nl> # include < stdint . h > <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - TEST_CASE ( " CCacheStateTest " , " [ cache ] " ) { <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test lock methods <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_lock " ) { <nl> - State state ; <nl> - bool success ; <nl> - <nl> - uint32_t outsideState = 0 ; <nl> - <nl> - auto cb1 = [ & outsideState ] ( ) - > void { outsideState = 1 ; } ; <nl> - <nl> - auto cb2 = [ & outsideState ] ( ) - > void { outsideState = 2 ; } ; <nl> - <nl> - / / check lock without contention <nl> - CHECK ( ! state . isLocked ( ) ) ; <nl> - success = state . lock ( - 1 , cb1 ) ; <nl> - CHECK ( success ) ; <nl> - CHECK ( state . isLocked ( ) ) ; <nl> - CHECK ( 1UL = = outsideState ) ; <nl> - <nl> - / / check lock with contention <nl> - success = state . lock ( 10LL , cb2 ) ; <nl> - CHECK ( ! success ) ; <nl> - CHECK ( state . isLocked ( ) ) ; <nl> - CHECK ( 1UL = = outsideState ) ; <nl> - <nl> - / / check unlock <nl> - state . unlock ( ) ; <nl> - CHECK ( ! state . isLocked ( ) ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test methods for non - lock flags <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_flags " ) { <nl> - State state ; <nl> - bool success ; <nl> - <nl> - success = state . lock ( ) ; <nl> - CHECK ( success ) ; <nl> - CHECK ( ! state . isSet ( State : : Flag : : migrated ) ) ; <nl> - state . unlock ( ) ; <nl> - <nl> - success = state . lock ( ) ; <nl> - CHECK ( success ) ; <nl> - CHECK ( ! state . isSet ( State : : Flag : : migrated ) ) ; <nl> - state . toggleFlag ( State : : Flag : : migrated ) ; <nl> - CHECK ( state . isSet ( State : : Flag : : migrated ) ) ; <nl> - state . unlock ( ) ; <nl> - <nl> - success = state . lock ( ) ; <nl> - CHECK ( success ) ; <nl> - CHECK ( state . isSet ( State : : Flag : : migrated ) ) ; <nl> - state . unlock ( ) ; <nl> - <nl> - success = state . lock ( ) ; <nl> - CHECK ( success ) ; <nl> - CHECK ( state . isSet ( State : : Flag : : migrated ) ) ; <nl> - state . toggleFlag ( State : : Flag : : migrated ) ; <nl> - CHECK ( ! state . isSet ( State : : Flag : : migrated ) ) ; <nl> - state . unlock ( ) ; <nl> - <nl> - success = state . lock ( ) ; <nl> - CHECK ( success ) ; <nl> - CHECK ( ! state . isSet ( State : : Flag : : migrated ) ) ; <nl> - state . unlock ( ) ; <nl> + TEST_CASE ( " cache : : State " , " [ cache ] " ) { <nl> + SECTION ( " test lock methods " ) { <nl> + State state ; <nl> + bool success ; <nl> + <nl> + uint32_t outsideState = 0 ; <nl> + <nl> + auto cb1 = [ & outsideState ] ( ) - > void { outsideState = 1 ; } ; <nl> + <nl> + auto cb2 = [ & outsideState ] ( ) - > void { outsideState = 2 ; } ; <nl> + <nl> + / / check lock without contention <nl> + REQUIRE ( ! state . isLocked ( ) ) ; <nl> + success = state . lock ( - 1 , cb1 ) ; <nl> + REQUIRE ( success ) ; <nl> + REQUIRE ( state . isLocked ( ) ) ; <nl> + REQUIRE ( 1UL = = outsideState ) ; <nl> + <nl> + / / check lock with contention <nl> + success = state . lock ( 10LL , cb2 ) ; <nl> + REQUIRE ( ! success ) ; <nl> + REQUIRE ( state . isLocked ( ) ) ; <nl> + REQUIRE ( 1UL = = outsideState ) ; <nl> + <nl> + / / check unlock <nl> + state . unlock ( ) ; <nl> + REQUIRE ( ! state . isLocked ( ) ) ; <nl> + } <nl> + <nl> + SECTION ( " test methods for non - lock flags " ) { <nl> + State state ; <nl> + bool success ; <nl> + <nl> + success = state . lock ( ) ; <nl> + REQUIRE ( success ) ; <nl> + REQUIRE ( ! state . isSet ( State : : Flag : : migrated ) ) ; <nl> + state . unlock ( ) ; <nl> + <nl> + success = state . lock ( ) ; <nl> + REQUIRE ( success ) ; <nl> + REQUIRE ( ! state . isSet ( State : : Flag : : migrated ) ) ; <nl> + state . toggleFlag ( State : : Flag : : migrated ) ; <nl> + REQUIRE ( state . isSet ( State : : Flag : : migrated ) ) ; <nl> + state . unlock ( ) ; <nl> + <nl> + success = state . lock ( ) ; <nl> + REQUIRE ( success ) ; <nl> + REQUIRE ( state . isSet ( State : : Flag : : migrated ) ) ; <nl> + state . unlock ( ) ; <nl> + <nl> + success = state . lock ( ) ; <nl> + REQUIRE ( success ) ; <nl> + REQUIRE ( state . isSet ( State : : Flag : : migrated ) ) ; <nl> + state . toggleFlag ( State : : Flag : : migrated ) ; <nl> + REQUIRE ( ! state . isSet ( State : : Flag : : migrated ) ) ; <nl> + state . unlock ( ) ; <nl> + <nl> + success = state . lock ( ) ; <nl> + REQUIRE ( success ) ; <nl> + REQUIRE ( ! state . isSet ( State : : Flag : : migrated ) ) ; <nl> + state . unlock ( ) ; <nl> + } <nl> } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - } <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> new file mode 100644 <nl> index 00000000000 . . 03316681753 <nl> mmm / dev / null <nl> ppp b / tests / Cache / Table . cpp <nl> <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / / @ brief test suite for arangodb : : cache : : State <nl> + / / / <nl> + / / / @ file <nl> + / / / <nl> + / / / DISCLAIMER <nl> + / / / <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> + / / / <nl> + / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> + / / / you may not use this file except in compliance with the License . <nl> + / / / You may obtain a copy of the License at <nl> + / / / <nl> + / / / http : / / www . apache . org / licenses / LICENSE - 2 . 0 <nl> + / / / <nl> + / / / Unless required by applicable law or agreed to in writing , software <nl> + / / / distributed under the License is distributed on an " AS IS " BASIS , <nl> + / / / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied . <nl> + / / / See the License for the specific language governing permissions and <nl> + / / / limitations under the License . <nl> + / / / <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> + / / / <nl> + / / / @ author Daniel H . Larkin <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> + / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + <nl> + # include " Cache / Table . h " <nl> + # include " Basics / Common . h " <nl> + # include " Cache / Common . h " <nl> + # include " Cache / PlainBucket . h " <nl> + <nl> + # include " catch . hpp " <nl> + <nl> + # include < stdint . h > <nl> + # include < memory > <nl> + <nl> + using namespace arangodb : : cache ; <nl> + <nl> + TEST_CASE ( " cache : : Table " , " [ cache ] " ) { <nl> + SECTION ( " test static allocation size method " ) { <nl> + for ( uint32_t i = Table : : minLogSize ; i < = Table : : maxLogSize ; i + + ) { <nl> + REQUIRE ( Table : : allocationSize ( i ) = = ( sizeof ( Table ) + ( BUCKET_SIZE < < i ) ) ) ; <nl> + } <nl> + } <nl> + <nl> + SECTION ( " test basic constructor behavior " ) { <nl> + for ( uint32_t i = Table : : minLogSize ; i < = 20 ; i + + ) { <nl> + auto table = std : : make_shared < Table > ( i ) ; <nl> + REQUIRE ( table . get ( ) ! = nullptr ) ; <nl> + REQUIRE ( table - > memoryUsage ( ) = = ( sizeof ( Table ) + ( BUCKET_SIZE < < i ) ) ) ; <nl> + REQUIRE ( table - > logSize ( ) = = i ) ; <nl> + REQUIRE ( table - > size ( ) = = ( static_cast < uint64_t > ( 1 ) < < i ) ) ; <nl> + } <nl> + } <nl> + <nl> + SECTION ( " test basic bucket - fetching behavior " ) { <nl> + auto table = std : : make_shared < Table > ( Table : : minLogSize ) ; <nl> + REQUIRE ( table . get ( ) ! = nullptr ) ; <nl> + table - > enable ( ) ; <nl> + for ( uint64_t i = 0 ; i < table - > size ( ) ; i + + ) { <nl> + uint32_t hash = static_cast < uint32_t > ( i < < ( 32 - Table : : minLogSize ) ) ; <nl> + auto pair = table - > fetchAndLockBucket ( hash , - 1 ) ; <nl> + auto bucket = reinterpret_cast < PlainBucket * > ( pair . first ) ; <nl> + auto source = pair . second ; <nl> + REQUIRE ( bucket ! = nullptr ) ; <nl> + REQUIRE ( bucket - > isLocked ( ) ) ; <nl> + REQUIRE ( source . get ( ) ! = nullptr ) ; <nl> + REQUIRE ( source = = table ) ; <nl> + <nl> + auto rawBucket = reinterpret_cast < PlainBucket * > ( table - > primaryBucket ( i ) ) ; <nl> + REQUIRE ( bucket = = rawBucket ) ; <nl> + <nl> + auto badPair = table - > fetchAndLockBucket ( hash , 10 ) ; <nl> + auto badBucket = reinterpret_cast < PlainBucket * > ( badPair . first ) ; <nl> + auto badSource = badPair . second ; <nl> + REQUIRE ( badBucket = = nullptr ) ; <nl> + REQUIRE ( badSource . get ( ) = = nullptr ) ; <nl> + <nl> + bucket - > unlock ( ) ; <nl> + } <nl> + } <nl> + <nl> + SECTION ( " make sure migration functions work as intended " ) { <nl> + auto small = std : : make_shared < Table > ( Table : : minLogSize ) ; <nl> + auto large = std : : make_shared < Table > ( Table : : minLogSize + 2 ) ; <nl> + auto huge = std : : make_shared < Table > ( Table : : minLogSize + 4 ) ; <nl> + small - > enable ( ) ; <nl> + large - > enable ( ) ; <nl> + huge - > enable ( ) ; <nl> + <nl> + SECTION ( " check that setAuxiliary works as intended " ) { <nl> + auto res = small - > setAuxiliary ( large ) ; <nl> + REQUIRE ( res . get ( ) = = nullptr ) ; <nl> + res = small - > setAuxiliary ( huge ) ; <nl> + REQUIRE ( res . get ( ) = = huge . get ( ) ) ; <nl> + res = small - > setAuxiliary ( std : : shared_ptr < Table > ( nullptr ) ) ; <nl> + REQUIRE ( res . get ( ) = = large . get ( ) ) ; <nl> + } <nl> + <nl> + SECTION ( " check that bucket locking falls through appropriately " ) { <nl> + auto res = small - > setAuxiliary ( large ) ; <nl> + REQUIRE ( res . get ( ) = = nullptr ) ; <nl> + <nl> + uint64_t indexSmall = 17 ; / / picked something at " random " <nl> + uint64_t indexLarge = indexSmall < < 2 ; <nl> + uint32_t hash = <nl> + static_cast < uint32_t > ( indexSmall < < ( 32 - small - > logSize ( ) ) ) ; <nl> + <nl> + auto pair = small - > fetchAndLockBucket ( hash , - 1 ) ; <nl> + auto bucket = reinterpret_cast < PlainBucket * > ( pair . first ) ; <nl> + auto source = pair . second ; <nl> + REQUIRE ( bucket = = <nl> + reinterpret_cast < PlainBucket * > ( small - > primaryBucket ( indexSmall ) ) ) ; <nl> + bucket - > _state . toggleFlag ( State : : Flag : : migrated ) ; <nl> + bucket - > unlock ( ) ; <nl> + REQUIRE ( source = = small ) ; <nl> + <nl> + pair = small - > fetchAndLockBucket ( hash , - 1 ) ; <nl> + bucket = reinterpret_cast < PlainBucket * > ( pair . first ) ; <nl> + source = pair . second ; <nl> + REQUIRE ( bucket = = <nl> + reinterpret_cast < PlainBucket * > ( large - > primaryBucket ( indexLarge ) ) ) ; <nl> + REQUIRE ( source = = large ) ; <nl> + pair = small - > fetchAndLockBucket ( hash , 10 ) ; <nl> + REQUIRE ( pair . first = = nullptr ) ; <nl> + REQUIRE ( pair . second . get ( ) = = nullptr ) ; <nl> + bucket - > unlock ( ) ; <nl> + } <nl> + <nl> + SECTION ( " check subtable fetching for moving to a smaller table " ) { <nl> + auto res = large - > setAuxiliary ( small ) ; <nl> + REQUIRE ( res . get ( ) = = nullptr ) ; <nl> + <nl> + uint64_t indexLarge = 822 ; / / picked something at " random " <nl> + uint64_t indexSmall = indexLarge > > 2 ; <nl> + uint32_t hash = <nl> + static_cast < uint32_t > ( indexLarge < < ( 32 - large - > logSize ( ) ) ) ; <nl> + <nl> + auto subtable = large - > auxiliaryBuckets ( indexLarge ) ; <nl> + REQUIRE ( subtable . get ( ) ! = nullptr ) ; <nl> + auto bucket = subtable - > fetchBucket ( hash ) ; <nl> + REQUIRE ( bucket = = small - > primaryBucket ( indexSmall ) ) ; <nl> + } <nl> + <nl> + SECTION ( " check subtable fetching for moving to a larger table " ) { <nl> + auto res = small - > setAuxiliary ( large ) ; <nl> + REQUIRE ( res . get ( ) = = nullptr ) ; <nl> + <nl> + uint64_t indexSmall = 217 ; / / picked something at " random " <nl> + uint64_t indexLargeBase = indexSmall < < 2 ; <nl> + <nl> + auto subtable = small - > auxiliaryBuckets ( indexSmall ) ; <nl> + REQUIRE ( subtable . get ( ) ! = nullptr ) ; <nl> + for ( uint32_t i = 0 ; i < 4 ; i + + ) { <nl> + uint32_t indexLarge = indexLargeBase + i ; <nl> + uint32_t hash = indexLarge < < ( 32 - large - > logSize ( ) ) ; <nl> + REQUIRE ( subtable - > fetchBucket ( hash ) = = <nl> + large - > primaryBucket ( indexLarge ) ) ; <nl> + } <nl> + } <nl> + <nl> + SECTION ( " check subtable apply all works " ) { <nl> + auto res = small - > setAuxiliary ( large ) ; <nl> + REQUIRE ( res . get ( ) = = nullptr ) ; <nl> + <nl> + uint64_t indexSmall = 172 ; / / picked something at " random " <nl> + uint64_t indexLargeBase = indexSmall < < 2 ; <nl> + <nl> + auto subtable = small - > auxiliaryBuckets ( indexSmall ) ; <nl> + REQUIRE ( subtable . get ( ) ! = nullptr ) ; <nl> + subtable - > applyToAllBuckets ( [ ] ( void * ptr ) - > bool { <nl> + PlainBucket * bucket = reinterpret_cast < PlainBucket * > ( ptr ) ; <nl> + return bucket - > lock ( - 1 ) ; <nl> + } ) ; <nl> + for ( uint32_t i = 0 ; i < 4 ; i + + ) { <nl> + uint32_t indexLarge = indexLargeBase + i ; <nl> + uint32_t hash = indexLarge < < ( 32 - large - > logSize ( ) ) ; <nl> + auto bucket = <nl> + reinterpret_cast < PlainBucket * > ( subtable - > fetchBucket ( hash ) ) ; <nl> + REQUIRE ( bucket - > isLocked ( ) ) ; <nl> + } <nl> + subtable - > applyToAllBuckets ( [ ] ( void * ptr ) - > bool { <nl> + PlainBucket * bucket = reinterpret_cast < PlainBucket * > ( ptr ) ; <nl> + bucket - > unlock ( ) ; <nl> + return true ; <nl> + } ) ; <nl> + } <nl> + <nl> + SECTION ( " test fill ratio methods " ) { <nl> + for ( uint64_t i = 0 ; i < large - > size ( ) ; i + + ) { <nl> + bool res = large - > slotFilled ( ) ; <nl> + if ( ( i + 1 ) < <nl> + static_cast < uint64_t > ( 0 . 125 * static_cast < double > ( large - > size ( ) ) ) ) { <nl> + REQUIRE ( large - > idealSize ( ) = = large - > logSize ( ) - 1 ) ; <nl> + REQUIRE ( res = = false ) ; <nl> + } else if ( ( i + 1 ) > static_cast < uint64_t > ( <nl> + 0 . 75 * static_cast < double > ( large - > size ( ) ) ) ) { <nl> + REQUIRE ( large - > idealSize ( ) = = large - > logSize ( ) + 1 ) ; <nl> + REQUIRE ( res = = true ) ; <nl> + } else { <nl> + REQUIRE ( large - > idealSize ( ) = = large - > logSize ( ) ) ; <nl> + REQUIRE ( res = = false ) ; <nl> + } <nl> + } <nl> + for ( uint64_t i = large - > size ( ) ; i > 0 ; i - - ) { <nl> + bool res = large - > slotEmptied ( ) ; <nl> + if ( ( i - 1 ) < <nl> + static_cast < uint64_t > ( 0 . 125 * static_cast < double > ( large - > size ( ) ) ) ) { <nl> + REQUIRE ( large - > idealSize ( ) = = large - > logSize ( ) - 1 ) ; <nl> + REQUIRE ( res = = true ) ; <nl> + } else if ( ( i - 1 ) > static_cast < uint64_t > ( <nl> + 0 . 75 * static_cast < double > ( large - > size ( ) ) ) ) { <nl> + REQUIRE ( large - > idealSize ( ) = = large - > logSize ( ) + 1 ) ; <nl> + REQUIRE ( res = = false ) ; <nl> + } else { <nl> + REQUIRE ( large - > idealSize ( ) = = large - > logSize ( ) ) ; <nl> + REQUIRE ( res = = false ) ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> mmm a / tests / Cache / TransactionManager . cpp <nl> ppp b / tests / Cache / TransactionManager . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / TransactionManager . h " <nl> # include " Basics / Common . h " <nl> + # include " Cache / Transaction . h " <nl> <nl> # include " catch . hpp " <nl> <nl> - # include " Cache / Transaction . h " <nl> - # include " Cache / TransactionManager . h " <nl> - <nl> # include < stdint . h > <nl> - # include < iostream > <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - TEST_CASE ( " CCacheTransactionManagerTest " , " [ cache ] " ) { <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test transaction term management <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_transaction_term " ) { <nl> - TransactionManager transactions ; <nl> - Transaction * tx1 ; <nl> - Transaction * tx2 ; <nl> - Transaction * tx3 ; <nl> - <nl> - CHECK ( 0ULL = = transactions . term ( ) ) ; <nl> - <nl> - tx1 = transactions . begin ( false ) ; <nl> - CHECK ( 1ULL = = transactions . term ( ) ) ; <nl> - transactions . end ( tx1 ) ; <nl> - CHECK ( 2ULL = = transactions . term ( ) ) ; <nl> - <nl> - tx1 = transactions . begin ( false ) ; <nl> - CHECK ( 3ULL = = transactions . term ( ) ) ; <nl> - tx2 = transactions . begin ( false ) ; <nl> - CHECK ( 3ULL = = transactions . term ( ) ) ; <nl> - transactions . end ( tx1 ) ; <nl> - CHECK ( 3ULL = = transactions . term ( ) ) ; <nl> - transactions . end ( tx2 ) ; <nl> - CHECK ( 4ULL = = transactions . term ( ) ) ; <nl> - <nl> - tx1 = transactions . begin ( true ) ; <nl> - CHECK ( 4ULL = = transactions . term ( ) ) ; <nl> - tx2 = transactions . begin ( false ) ; <nl> - CHECK ( 5ULL = = transactions . term ( ) ) ; <nl> - transactions . end ( tx2 ) ; <nl> - CHECK ( 5ULL = = transactions . term ( ) ) ; <nl> - transactions . end ( tx1 ) ; <nl> - CHECK ( 6ULL = = transactions . term ( ) ) ; <nl> - <nl> - tx1 = transactions . begin ( true ) ; <nl> - CHECK ( 6ULL = = transactions . term ( ) ) ; <nl> - tx2 = transactions . begin ( false ) ; <nl> - CHECK ( 7ULL = = transactions . term ( ) ) ; <nl> - transactions . end ( tx2 ) ; <nl> - CHECK ( 7ULL = = transactions . term ( ) ) ; <nl> - tx3 = transactions . begin ( true ) ; <nl> - CHECK ( 7ULL = = transactions . term ( ) ) ; <nl> - transactions . end ( tx1 ) ; <nl> - CHECK ( 8ULL = = transactions . term ( ) ) ; <nl> - transactions . end ( tx3 ) ; <nl> - CHECK ( 8ULL = = transactions . term ( ) ) ; <nl> + TEST_CASE ( " cache : : TransactionManager " , " [ cache ] " ) { <nl> + SECTION ( " verify that transaction term is maintained correctly " ) { <nl> + TransactionManager transactions ; <nl> + Transaction * tx1 ; <nl> + Transaction * tx2 ; <nl> + Transaction * tx3 ; <nl> + <nl> + REQUIRE ( 0ULL = = transactions . term ( ) ) ; <nl> + <nl> + tx1 = transactions . begin ( false ) ; <nl> + REQUIRE ( 1ULL = = transactions . term ( ) ) ; <nl> + transactions . end ( tx1 ) ; <nl> + REQUIRE ( 2ULL = = transactions . term ( ) ) ; <nl> + <nl> + tx1 = transactions . begin ( false ) ; <nl> + REQUIRE ( 3ULL = = transactions . term ( ) ) ; <nl> + tx2 = transactions . begin ( false ) ; <nl> + REQUIRE ( 3ULL = = transactions . term ( ) ) ; <nl> + transactions . end ( tx1 ) ; <nl> + REQUIRE ( 3ULL = = transactions . term ( ) ) ; <nl> + transactions . end ( tx2 ) ; <nl> + REQUIRE ( 4ULL = = transactions . term ( ) ) ; <nl> + <nl> + tx1 = transactions . begin ( true ) ; <nl> + REQUIRE ( 4ULL = = transactions . term ( ) ) ; <nl> + tx2 = transactions . begin ( false ) ; <nl> + REQUIRE ( 5ULL = = transactions . term ( ) ) ; <nl> + transactions . end ( tx2 ) ; <nl> + REQUIRE ( 5ULL = = transactions . term ( ) ) ; <nl> + transactions . end ( tx1 ) ; <nl> + REQUIRE ( 6ULL = = transactions . term ( ) ) ; <nl> + <nl> + tx1 = transactions . begin ( true ) ; <nl> + REQUIRE ( 6ULL = = transactions . term ( ) ) ; <nl> + tx2 = transactions . begin ( false ) ; <nl> + REQUIRE ( 7ULL = = transactions . term ( ) ) ; <nl> + transactions . end ( tx2 ) ; <nl> + REQUIRE ( 7ULL = = transactions . term ( ) ) ; <nl> + tx3 = transactions . begin ( true ) ; <nl> + REQUIRE ( 7ULL = = transactions . term ( ) ) ; <nl> + transactions . end ( tx1 ) ; <nl> + REQUIRE ( 8ULL = = transactions . term ( ) ) ; <nl> + transactions . end ( tx3 ) ; <nl> + REQUIRE ( 8ULL = = transactions . term ( ) ) ; <nl> + } <nl> } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - } <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> mmm a / tests / Cache / TransactionalBucket . cpp <nl> ppp b / tests / Cache / TransactionalBucket . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / TransactionalBucket . h " <nl> # include " Basics / Common . h " <nl> <nl> # include " catch . hpp " <nl> <nl> - # include " Cache / TransactionalBucket . h " <nl> - <nl> # include < stdint . h > <nl> # include < string > <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - TEST_CASE ( " CCacheTransactionalBucketTest " , " [ cache ] " ) { <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test lock methods <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_locks " ) { <nl> - TransactionalBucket bucket ; <nl> - bool success ; <nl> - <nl> - / / check lock without contention <nl> - CHECK ( ! bucket . isLocked ( ) ) ; <nl> - success = bucket . lock ( 0ULL , - 1LL ) ; <nl> - CHECK ( success ) ; <nl> - CHECK ( bucket . isLocked ( ) ) ; <nl> - <nl> - / / check lock with contention <nl> - success = bucket . lock ( 0ULL , 10LL ) ; <nl> - CHECK ( ! success ) ; <nl> - CHECK ( bucket . isLocked ( ) ) ; <nl> - <nl> - / / check unlock <nl> - bucket . unlock ( ) ; <nl> - CHECK ( ! bucket . isLocked ( ) ) ; <nl> - <nl> - / / check that blacklist term is updated appropriately <nl> - CHECK ( 0ULL = = bucket . _blacklistTerm ) ; <nl> - bucket . lock ( 1ULL , - 1LL ) ; <nl> - CHECK ( 1ULL = = bucket . _blacklistTerm ) ; <nl> - bucket . unlock ( ) ; <nl> - CHECK ( 1ULL = = bucket . _blacklistTerm ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test insertion to full and fail beyond <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_insertion " ) { <nl> - TransactionalBucket bucket ; <nl> - bool success ; <nl> - <nl> - uint32_t hashes [ 4 ] = { <nl> - 1 , 2 , 3 , 4 } ; / / don ' t have to be real , but should be unique and non - zero <nl> - uint64_t keys [ 4 ] = { 0 , 1 , 2 , 3 } ; <nl> - uint64_t values [ 4 ] = { 0 , 1 , 2 , 3 } ; <nl> - CachedValue * ptrs [ 4 ] ; <nl> - for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> - ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , & ( values [ i ] ) , <nl> - sizeof ( uint64_t ) ) ; <nl> + TEST_CASE ( " cache : : TransactionalBucket " , " [ cache ] " ) { <nl> + SECTION ( " test locking behavior " ) { <nl> + TransactionalBucket bucket ; <nl> + bool success ; <nl> + <nl> + / / check lock without contention <nl> + REQUIRE ( ! bucket . isLocked ( ) ) ; <nl> + success = bucket . lock ( - 1LL ) ; <nl> + REQUIRE ( success ) ; <nl> + REQUIRE ( bucket . isLocked ( ) ) ; <nl> + <nl> + / / check lock with contention <nl> + success = bucket . lock ( 10LL ) ; <nl> + REQUIRE ( ! success ) ; <nl> + REQUIRE ( bucket . isLocked ( ) ) ; <nl> + <nl> + / / check unlock <nl> + bucket . unlock ( ) ; <nl> + REQUIRE ( ! bucket . isLocked ( ) ) ; <nl> + <nl> + / / check that blacklist term is updated appropriately <nl> + REQUIRE ( 0ULL = = bucket . _blacklistTerm ) ; <nl> + bucket . lock ( - 1LL ) ; <nl> + bucket . updateBlacklistTerm ( 1ULL ) ; <nl> + REQUIRE ( 1ULL = = bucket . _blacklistTerm ) ; <nl> + bucket . unlock ( ) ; <nl> + REQUIRE ( 1ULL = = bucket . _blacklistTerm ) ; <nl> } <nl> <nl> - success = bucket . lock ( 0 , - 1LL ) ; <nl> - CHECK ( success ) ; <nl> - <nl> - / / insert three to fill <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> - if ( i < 2 ) { <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - } else { <nl> - CHECK ( bucket . isFull ( ) ) ; <nl> + SECTION ( " verify that insertion works as expected " ) { <nl> + TransactionalBucket bucket ; <nl> + bool success ; <nl> + <nl> + uint32_t hashes [ 4 ] = { <nl> + 1 , 2 , 3 , <nl> + 4 } ; / / don ' t have to be real , but should be unique and non - zero <nl> + uint64_t keys [ 4 ] = { 0 , 1 , 2 , 3 } ; <nl> + uint64_t values [ 4 ] = { 0 , 1 , 2 , 3 } ; <nl> + CachedValue * ptrs [ 4 ] ; <nl> + for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> + ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> + & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> } <nl> - } <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - CachedValue * res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ i ] ) ; <nl> - } <nl> - <nl> - / / check that insert is ignored if full <nl> - bucket . insert ( hashes [ 3 ] , ptrs [ 3 ] ) ; <nl> - CachedValue * res = bucket . find ( hashes [ 3 ] , ptrs [ 3 ] - > key ( ) , ptrs [ 3 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - <nl> - bucket . unlock ( ) ; <nl> - <nl> - / / cleanup <nl> - for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> - delete ptrs [ i ] ; <nl> - } <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test removal <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - SECTION ( " tst_removal " ) { <nl> - TransactionalBucket bucket ; <nl> - bool success ; <nl> - <nl> - uint32_t hashes [ 3 ] = { <nl> - 1 , 2 , 3 } ; / / don ' t have to be real , but should be unique and non - zero <nl> - uint64_t keys [ 3 ] = { 0 , 1 , 2 } ; <nl> - uint64_t values [ 3 ] = { 0 , 1 , 2 } ; <nl> - CachedValue * ptrs [ 3 ] ; <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , & ( values [ i ] ) , <nl> - sizeof ( uint64_t ) ) ; <nl> - } <nl> + success = bucket . lock ( - 1LL ) ; <nl> + REQUIRE ( success ) ; <nl> + <nl> + / / insert three to fill <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> + if ( i < 2 ) { <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + } else { <nl> + REQUIRE ( bucket . isFull ( ) ) ; <nl> + } <nl> + } <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + CachedValue * res = <nl> + bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ i ] ) ; <nl> + } <nl> <nl> - success = bucket . lock ( 0 , - 1LL ) ; <nl> - CHECK ( success ) ; <nl> + / / check that insert is ignored if full <nl> + bucket . insert ( hashes [ 3 ] , ptrs [ 3 ] ) ; <nl> + CachedValue * res = bucket . find ( hashes [ 3 ] , ptrs [ 3 ] - > key ( ) , ptrs [ 3 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> - } <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - CachedValue * res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ i ] ) ; <nl> - } <nl> + bucket . unlock ( ) ; <nl> <nl> - CachedValue * res ; <nl> - res = bucket . remove ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ 1 ] ) ; <nl> - res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - res = bucket . remove ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ 0 ] ) ; <nl> - res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - res = bucket . remove ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ 2 ] ) ; <nl> - res = bucket . find ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - <nl> - bucket . unlock ( ) ; <nl> - <nl> - / / cleanup <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - delete ptrs [ i ] ; <nl> + / / cleanup <nl> + for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> + delete ptrs [ i ] ; <nl> + } <nl> } <nl> - } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test eviction with subsequent insertion <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + SECTION ( " verify that removal works as expected " ) { <nl> + TransactionalBucket bucket ; <nl> + bool success ; <nl> + <nl> + uint32_t hashes [ 3 ] = { <nl> + 1 , 2 , 3 } ; / / don ' t have to be real , but should be unique and non - zero <nl> + uint64_t keys [ 3 ] = { 0 , 1 , 2 } ; <nl> + uint64_t values [ 3 ] = { 0 , 1 , 2 } ; <nl> + CachedValue * ptrs [ 3 ] ; <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> + & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + } <nl> <nl> - SECTION ( " tst_eviction " ) { <nl> - TransactionalBucket bucket ; <nl> - bool success ; <nl> - <nl> - uint32_t hashes [ 4 ] = { <nl> - 1 , 2 , 3 , 4 } ; / / don ' t have to be real , but should be unique and non - zero <nl> - uint64_t keys [ 4 ] = { 0 , 1 , 2 , 3 } ; <nl> - uint64_t values [ 4 ] = { 0 , 1 , 2 , 3 } ; <nl> - CachedValue * ptrs [ 4 ] ; <nl> - for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> - ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , & ( values [ i ] ) , <nl> - sizeof ( uint64_t ) ) ; <nl> - } <nl> + success = bucket . lock ( - 1LL ) ; <nl> + REQUIRE ( success ) ; <nl> <nl> - success = bucket . lock ( 0 , - 1LL ) ; <nl> - CHECK ( success ) ; <nl> - <nl> - / / insert three to fill <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> - if ( i < 2 ) { <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - } else { <nl> - CHECK ( bucket . isFull ( ) ) ; <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> + } <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + CachedValue * res = <nl> + bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ i ] ) ; <nl> } <nl> - } <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - CachedValue * res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ i ] ) ; <nl> - } <nl> <nl> - / / check that we get proper eviction candidate <nl> - CachedValue * candidate = bucket . evictionCandidate ( ) ; <nl> - CHECK ( candidate = = ptrs [ 0 ] ) ; <nl> - bucket . evict ( candidate , false ) ; <nl> - CachedValue * res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - <nl> - / / check that we still find the right candidate if not full <nl> - candidate = bucket . evictionCandidate ( ) ; <nl> - CHECK ( candidate = = ptrs [ 1 ] ) ; <nl> - bucket . evict ( candidate , true ) ; <nl> - res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - <nl> - / / check that we can insert now after eviction optimized for insertion <nl> - bucket . insert ( hashes [ 3 ] , ptrs [ 3 ] ) ; <nl> - res = bucket . find ( hashes [ 3 ] , ptrs [ 3 ] - > key ( ) , ptrs [ 3 ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ 3 ] ) ; <nl> - <nl> - bucket . unlock ( ) ; <nl> - <nl> - / / cleanup <nl> - for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> - delete ptrs [ i ] ; <nl> + CachedValue * res ; <nl> + res = bucket . remove ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ 1 ] ) ; <nl> + res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + res = bucket . remove ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ 0 ] ) ; <nl> + res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + res = bucket . remove ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ 2 ] ) ; <nl> + res = bucket . find ( hashes [ 2 ] , ptrs [ 2 ] - > key ( ) , ptrs [ 2 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + <nl> + bucket . unlock ( ) ; <nl> + <nl> + / / cleanup <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + delete ptrs [ i ] ; <nl> + } <nl> } <nl> - } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test blacklist methods <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + SECTION ( " verify that eviction works as expected " ) { <nl> + TransactionalBucket bucket ; <nl> + bool success ; <nl> + <nl> + uint32_t hashes [ 4 ] = { <nl> + 1 , 2 , 3 , <nl> + 4 } ; / / don ' t have to be real , but should be unique and non - zero <nl> + uint64_t keys [ 4 ] = { 0 , 1 , 2 , 3 } ; <nl> + uint64_t values [ 4 ] = { 0 , 1 , 2 , 3 } ; <nl> + CachedValue * ptrs [ 4 ] ; <nl> + for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> + ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> + & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + } <nl> <nl> - SECTION ( " tst_blacklist " ) { <nl> - TransactionalBucket bucket ; <nl> - bool success ; <nl> - CachedValue * res ; <nl> - <nl> - uint32_t hashes [ 7 ] = { 1 , 1 , 2 , 3 , <nl> - 4 , 5 , 6 } ; / / don ' t have to be real , want some overlap <nl> - uint64_t keys [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> - uint64_t values [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> - CachedValue * ptrs [ 6 ] ; <nl> - for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> - ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , & ( values [ i ] ) , <nl> - sizeof ( uint64_t ) ) ; <nl> - } <nl> + success = bucket . lock ( - 1LL ) ; <nl> + REQUIRE ( success ) ; <nl> + <nl> + / / insert three to fill <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> + if ( i < 2 ) { <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + } else { <nl> + REQUIRE ( bucket . isFull ( ) ) ; <nl> + } <nl> + } <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + CachedValue * res = <nl> + bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ i ] ) ; <nl> + } <nl> <nl> - success = bucket . lock ( 1ULL , - 1LL ) ; <nl> - CHECK ( success ) ; <nl> - <nl> - / / insert three to fill <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> - if ( i < 2 ) { <nl> - CHECK ( ! bucket . isFull ( ) ) ; <nl> - } else { <nl> - CHECK ( bucket . isFull ( ) ) ; <nl> + / / check that we get proper eviction candidate <nl> + CachedValue * candidate = bucket . evictionCandidate ( ) ; <nl> + REQUIRE ( candidate = = ptrs [ 0 ] ) ; <nl> + bucket . evict ( candidate , false ) ; <nl> + CachedValue * res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + <nl> + / / check that we still find the right candidate if not full <nl> + candidate = bucket . evictionCandidate ( ) ; <nl> + REQUIRE ( candidate = = ptrs [ 1 ] ) ; <nl> + bucket . evict ( candidate , true ) ; <nl> + res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + <nl> + / / check that we can insert now after eviction optimized for insertion <nl> + bucket . insert ( hashes [ 3 ] , ptrs [ 3 ] ) ; <nl> + res = bucket . find ( hashes [ 3 ] , ptrs [ 3 ] - > key ( ) , ptrs [ 3 ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ 3 ] ) ; <nl> + <nl> + bucket . unlock ( ) ; <nl> + <nl> + / / cleanup <nl> + for ( size_t i = 0 ; i < 4 ; i + + ) { <nl> + delete ptrs [ i ] ; <nl> } <nl> } <nl> - for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> - res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ i ] ) ; <nl> - } <nl> <nl> - / / blacklist 1 - 4 to fill blacklist <nl> - for ( size_t i = 1 ; i < 5 ; i + + ) { <nl> - bucket . blacklist ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> - } <nl> - for ( size_t i = 1 ; i < 5 ; i + + ) { <nl> - CHECK ( bucket . isBlacklisted ( hashes [ i ] ) ) ; <nl> - res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - } <nl> - / / verify actually not fully blacklisted <nl> - CHECK ( ! bucket . isFullyBlacklisted ( ) ) ; <nl> - CHECK ( ! bucket . isBlacklisted ( hashes [ 6 ] ) ) ; <nl> - / / verify it didn ' t remove matching hash with non - matching key <nl> - res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> - CHECK ( res = = ptrs [ 0 ] ) ; <nl> - <nl> - / / verify we can ' t insert a key with a blacklisted hash <nl> - bucket . insert ( hashes [ 1 ] , ptrs [ 1 ] ) ; <nl> - res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - <nl> - / / proceed to fully blacklist <nl> - bucket . blacklist ( hashes [ 5 ] , ptrs [ 5 ] - > key ( ) , ptrs [ 5 ] - > keySize ) ; <nl> - CHECK ( bucket . isBlacklisted ( hashes [ 5 ] ) ) ; <nl> - res = bucket . find ( hashes [ 5 ] , ptrs [ 5 ] - > key ( ) , ptrs [ 5 ] - > keySize ) ; <nl> - CHECK ( nullptr = = res ) ; <nl> - / / make sure it still didn ' t remove non - matching key <nl> - res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> - CHECK ( ptrs [ 0 ] = = res ) ; <nl> - / / make sure it ' s fully blacklisted <nl> - CHECK ( bucket . isFullyBlacklisted ( ) ) ; <nl> - CHECK ( bucket . isBlacklisted ( hashes [ 6 ] ) ) ; <nl> - <nl> - bucket . unlock ( ) ; <nl> - <nl> - / / check that updating blacklist term clears blacklist <nl> - bucket . lock ( 2ULL , - 1LL ) ; <nl> - CHECK ( ! bucket . isFullyBlacklisted ( ) ) ; <nl> - for ( size_t i = 0 ; i < 7 ; i + + ) { <nl> - CHECK ( ! bucket . isBlacklisted ( hashes [ i ] ) ) ; <nl> - } <nl> - bucket . unlock ( ) ; <nl> + SECTION ( " verify that blacklisting works as expected " ) { <nl> + TransactionalBucket bucket ; <nl> + bool success ; <nl> + CachedValue * res ; <nl> + <nl> + uint32_t hashes [ 7 ] = { 1 , 1 , 2 , 3 , <nl> + 4 , 5 , 6 } ; / / don ' t have to be real , want some overlap <nl> + uint64_t keys [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> + uint64_t values [ 6 ] = { 0 , 1 , 2 , 3 , 4 , 5 } ; <nl> + CachedValue * ptrs [ 6 ] ; <nl> + for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> + ptrs [ i ] = CachedValue : : construct ( & ( keys [ i ] ) , sizeof ( uint64_t ) , <nl> + & ( values [ i ] ) , sizeof ( uint64_t ) ) ; <nl> + } <nl> <nl> - / / cleanup <nl> - for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> - delete ptrs [ i ] ; <nl> - } <nl> - } <nl> + success = bucket . lock ( - 1LL ) ; <nl> + bucket . updateBlacklistTerm ( 1ULL ) ; <nl> + REQUIRE ( success ) ; <nl> + <nl> + / / insert three to fill <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + bucket . insert ( hashes [ i ] , ptrs [ i ] ) ; <nl> + if ( i < 2 ) { <nl> + REQUIRE ( ! bucket . isFull ( ) ) ; <nl> + } else { <nl> + REQUIRE ( bucket . isFull ( ) ) ; <nl> + } <nl> + } <nl> + for ( size_t i = 0 ; i < 3 ; i + + ) { <nl> + res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ i ] ) ; <nl> + } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / blacklist 1 - 4 to fill blacklist <nl> + for ( size_t i = 1 ; i < 5 ; i + + ) { <nl> + bucket . blacklist ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + } <nl> + for ( size_t i = 1 ; i < 5 ; i + + ) { <nl> + REQUIRE ( bucket . isBlacklisted ( hashes [ i ] ) ) ; <nl> + res = bucket . find ( hashes [ i ] , ptrs [ i ] - > key ( ) , ptrs [ i ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + } <nl> + / / verify actually not fully blacklisted <nl> + REQUIRE ( ! bucket . isFullyBlacklisted ( ) ) ; <nl> + REQUIRE ( ! bucket . isBlacklisted ( hashes [ 6 ] ) ) ; <nl> + / / verify it didn ' t remove matching hash with non - matching key <nl> + res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + REQUIRE ( res = = ptrs [ 0 ] ) ; <nl> + <nl> + / / verify we can ' t insert a key with a blacklisted hash <nl> + bucket . insert ( hashes [ 1 ] , ptrs [ 1 ] ) ; <nl> + res = bucket . find ( hashes [ 1 ] , ptrs [ 1 ] - > key ( ) , ptrs [ 1 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + <nl> + / / proceed to fully blacklist <nl> + bucket . blacklist ( hashes [ 5 ] , ptrs [ 5 ] - > key ( ) , ptrs [ 5 ] - > keySize ) ; <nl> + REQUIRE ( bucket . isBlacklisted ( hashes [ 5 ] ) ) ; <nl> + res = bucket . find ( hashes [ 5 ] , ptrs [ 5 ] - > key ( ) , ptrs [ 5 ] - > keySize ) ; <nl> + REQUIRE ( nullptr = = res ) ; <nl> + / / make sure it still didn ' t remove non - matching key <nl> + res = bucket . find ( hashes [ 0 ] , ptrs [ 0 ] - > key ( ) , ptrs [ 0 ] - > keySize ) ; <nl> + REQUIRE ( ptrs [ 0 ] = = res ) ; <nl> + / / make sure it ' s fully blacklisted <nl> + REQUIRE ( bucket . isFullyBlacklisted ( ) ) ; <nl> + REQUIRE ( bucket . isBlacklisted ( hashes [ 6 ] ) ) ; <nl> + <nl> + bucket . unlock ( ) ; <nl> + <nl> + / / check that updating blacklist term clears blacklist <nl> + bucket . lock ( - 1LL ) ; <nl> + bucket . updateBlacklistTerm ( 2ULL ) ; <nl> + REQUIRE ( ! bucket . isFullyBlacklisted ( ) ) ; <nl> + for ( size_t i = 0 ; i < 7 ; i + + ) { <nl> + REQUIRE ( ! bucket . isBlacklisted ( hashes [ i ] ) ) ; <nl> + } <nl> + bucket . unlock ( ) ; <nl> <nl> + / / cleanup <nl> + for ( size_t i = 0 ; i < 6 ; i + + ) { <nl> + delete ptrs [ i ] ; <nl> + } <nl> + } <nl> } <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> mmm a / tests / Cache / TransactionalCache . cpp <nl> ppp b / tests / Cache / TransactionalCache . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> + # include " Cache / TransactionalCache . h " <nl> # include " Basics / Common . h " <nl> - # include " Random / RandomGenerator . h " <nl> - <nl> - # include " catch . hpp " <nl> - <nl> + # include " Cache / Common . h " <nl> # include " Cache / Manager . h " <nl> # include " Cache / Transaction . h " <nl> - # include " Cache / TransactionalCache . h " <nl> + # include " Random / RandomGenerator . h " <nl> <nl> # include " MockScheduler . h " <nl> + # include " catch . hpp " <nl> <nl> # include < stdint . h > <nl> # include < string > <nl> # include < thread > <nl> # include < vector > <nl> <nl> - # include < iostream > <nl> - <nl> using namespace arangodb ; <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - TEST_CASE ( " CCacheTransactionalCacheTest " , <nl> - " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test construction ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_st_construction " ) { <nl> - Manager manager ( nullptr , 1024ULL * 1024ULL ) ; <nl> - auto cache1 = manager . createCache ( Manager : : CacheType : : Transactional , <nl> - 256ULL * 1024ULL , false ) ; <nl> - auto cache2 = manager . createCache ( Manager : : CacheType : : Transactional , <nl> - 512ULL * 1024ULL , false ) ; <nl> - <nl> - CHECK ( 0ULL = = cache1 - > usage ( ) ) ; <nl> - CHECK ( 256ULL * 1024ULL = = cache1 - > limit ( ) ) ; <nl> - CHECK ( 0ULL = = cache2 - > usage ( ) ) ; <nl> - CHECK ( 512ULL * 1024ULL > cache2 - > limit ( ) ) ; <nl> - <nl> - manager . destroyCache ( cache1 ) ; <nl> - manager . destroyCache ( cache2 ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test insertion ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_st_insertion " ) { <nl> - uint64_t cacheLimit = 256ULL * 1024ULL ; <nl> - Manager manager ( nullptr , 4ULL * cacheLimit ) ; <nl> - auto cache = <nl> - manager . createCache ( Manager : : CacheType : : Transactional , cacheLimit , false ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - CHECK ( success ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( f . found ( ) ) ; <nl> - } <nl> - <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - uint64_t j = 2 * i ; <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & j , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - CHECK ( success ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( f . found ( ) ) ; <nl> - CHECK ( 0 = = memcmp ( f . value ( ) - > value ( ) , & j , sizeof ( uint64_t ) ) ) ; <nl> + TEST_CASE ( " cache : : TransactionalCache " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> + SECTION ( " test basic cache construction " ) { <nl> + Manager manager ( nullptr , 1024 * 1024 ) ; <nl> + auto cache1 = <nl> + manager . createCache ( CacheType : : Transactional , false , 256 * 1024 ) ; <nl> + auto cache2 = <nl> + manager . createCache ( CacheType : : Transactional , false , 512 * 1024 ) ; <nl> + <nl> + REQUIRE ( 0 = = cache1 - > usage ( ) ) ; <nl> + REQUIRE ( 256 * 1024 > = cache1 - > size ( ) ) ; <nl> + REQUIRE ( 0 = = cache2 - > usage ( ) ) ; <nl> + REQUIRE ( 512 * 1024 > = cache2 - > size ( ) ) ; <nl> + <nl> + manager . destroyCache ( cache1 ) ; <nl> + manager . destroyCache ( cache2 ) ; <nl> } <nl> <nl> - uint64_t notInserted = 0 ; <nl> - for ( uint64_t i = 1024 ; i < 128 * 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - if ( success ) { <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( f . found ( ) ) ; <nl> - } else { <nl> - delete value ; <nl> - notInserted + + ; <nl> + SECTION ( " verify that insertion works as expected " ) { <nl> + uint64_t cacheLimit = 256 * 1024 ; <nl> + Manager manager ( nullptr , 4 * cacheLimit ) ; <nl> + auto cache = <nl> + manager . createCache ( CacheType : : Transactional , false , cacheLimit ) ; <nl> + <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> } <nl> - } <nl> - CHECK ( notInserted > 0 ) ; <nl> <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + uint64_t j = 2 * i ; <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & j , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + REQUIRE ( 0 = = memcmp ( f . value ( ) - > value ( ) , & j , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> + } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test removal ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + for ( uint64_t i = 1024 ; i < 256 * 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> + } <nl> + REQUIRE ( cache - > size ( ) < = 256 * 1024 ) ; <nl> <nl> - SECTION ( " tst_st_removal " ) { <nl> - uint64_t cacheLimit = 256ULL * 1024ULL ; <nl> - Manager manager ( nullptr , 4ULL * cacheLimit ) ; <nl> - auto cache = <nl> - manager . createCache ( Manager : : CacheType : : Transactional , cacheLimit , false ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - CHECK ( success ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( f . found ( ) ) ; <nl> - CHECK ( f . value ( ) ! = nullptr ) ; <nl> - CHECK ( f . value ( ) - > sameKey ( & i , sizeof ( uint64_t ) ) ) ; <nl> + manager . destroyCache ( cache ) ; <nl> } <nl> <nl> - / / test removal of bogus keys <nl> - for ( uint64_t i = 1024 ; i < 2048 ; i + + ) { <nl> - bool removed = cache - > remove ( & i , sizeof ( uint64_t ) ) ; <nl> - REQUIRE ( removed ) ; <nl> - / / ensure existing keys not removed <nl> + SECTION ( " verify removal works as expected " ) { <nl> + uint64_t cacheLimit = 256 * 1024 ; <nl> + Manager manager ( nullptr , 4 * cacheLimit ) ; <nl> + auto cache = <nl> + manager . createCache ( CacheType : : Transactional , false , cacheLimit ) ; <nl> + <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + REQUIRE ( f . value ( ) ! = nullptr ) ; <nl> + REQUIRE ( f . value ( ) - > sameKey ( & i , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> + } <nl> + uint64_t inserted = 0 ; <nl> for ( uint64_t j = 0 ; j < 1024 ; j + + ) { <nl> auto f = cache - > find ( & j , sizeof ( uint64_t ) ) ; <nl> - CHECK ( f . found ( ) ) ; <nl> - CHECK ( f . value ( ) ! = nullptr ) ; <nl> - CHECK ( f . value ( ) - > sameKey ( & j , sizeof ( uint64_t ) ) ) ; <nl> + if ( f . found ( ) ) { <nl> + inserted + + ; <nl> + REQUIRE ( f . value ( ) ! = nullptr ) ; <nl> + REQUIRE ( f . value ( ) - > sameKey ( & j , sizeof ( uint64_t ) ) ) ; <nl> + } <nl> } <nl> - } <nl> - <nl> - / / remove actual keys <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - bool removed = cache - > remove ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( removed ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( ! f . found ( ) ) ; <nl> - } <nl> - <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test blacklisting ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_st_blacklist " ) { <nl> - uint64_t cacheLimit = 256ULL * 1024ULL ; <nl> - Manager manager ( nullptr , 4ULL * cacheLimit ) ; <nl> - auto cache = <nl> - manager . createCache ( Manager : : CacheType : : Transactional , cacheLimit , false ) ; <nl> - <nl> - Transaction * tx = manager . beginTransaction ( false ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - CHECK ( success ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( f . found ( ) ) ; <nl> - CHECK ( f . value ( ) ! = nullptr ) ; <nl> - CHECK ( f . value ( ) - > sameKey ( & i , sizeof ( uint64_t ) ) ) ; <nl> - } <nl> <nl> - for ( uint64_t i = 512 ; i < 1024 ; i + + ) { <nl> - bool success = cache - > blacklist ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( success ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( ! f . found ( ) ) ; <nl> - } <nl> - <nl> - for ( uint64_t i = 512 ; i < 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - CHECK ( ! success ) ; <nl> - delete value ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( ! f . found ( ) ) ; <nl> - } <nl> + / / test removal of bogus keys <nl> + for ( uint64_t i = 1024 ; i < 2048 ; i + + ) { <nl> + bool removed = cache - > remove ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( removed ) ; <nl> + / / ensure existing keys not removed <nl> + uint64_t found = 0 ; <nl> + for ( uint64_t j = 0 ; j < 1024 ; j + + ) { <nl> + auto f = cache - > find ( & j , sizeof ( uint64_t ) ) ; <nl> + if ( f . found ( ) ) { <nl> + found + + ; <nl> + REQUIRE ( f . value ( ) ! = nullptr ) ; <nl> + REQUIRE ( f . value ( ) - > sameKey ( & j , sizeof ( uint64_t ) ) ) ; <nl> + } <nl> + } <nl> + REQUIRE ( inserted = = found ) ; <nl> + } <nl> <nl> - manager . endTransaction ( tx ) ; <nl> - tx = manager . beginTransaction ( false ) ; <nl> + / / remove actual keys <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + bool removed = cache - > remove ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( removed ) ; <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( ! f . found ( ) ) ; <nl> + } <nl> <nl> - for ( uint64_t i = 512 ; i < 1024 ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - CHECK ( success ) ; <nl> - auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> - CHECK ( f . found ( ) ) ; <nl> + manager . destroyCache ( cache ) ; <nl> } <nl> <nl> - manager . endTransaction ( tx ) ; <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> + SECTION ( " verify blacklisting works as expected " ) { <nl> + uint64_t cacheLimit = 256 * 1024 ; <nl> + Manager manager ( nullptr , 4 * cacheLimit ) ; <nl> + auto cache = <nl> + manager . createCache ( CacheType : : Transactional , false , cacheLimit ) ; <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test growth behavior ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + Transaction * tx = manager . beginTransaction ( false ) ; <nl> <nl> - SECTION ( " tst_st_growth " ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - uint64_t minimumSize = 64ULL * initialSize ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> - auto cache = <nl> - manager . createCache ( Manager : : CacheType : : Transactional , initialSize , true ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 4ULL * 1024ULL * 1024ULL ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - if ( ! success ) { <nl> - delete value ; <nl> + for ( uint64_t i = 0 ; i < 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + REQUIRE ( f . value ( ) ! = nullptr ) ; <nl> + REQUIRE ( f . value ( ) - > sameKey ( & i , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> } <nl> - } <nl> - <nl> - CHECK ( cache - > usage ( ) > minimumSize ) ; <nl> <nl> - manager . destroyCache ( cache ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test shrink behavior ( single - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + for ( uint64_t i = 512 ; i < 1024 ; i + + ) { <nl> + bool success = cache - > blacklist ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( success ) ; <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( ! f . found ( ) ) ; <nl> + } <nl> <nl> - SECTION ( " tst_st_shrink " ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> - auto cache = <nl> - manager . createCache ( Manager : : CacheType : : Transactional , initialSize , true ) ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 16ULL * 1024ULL * 1024ULL ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - if ( ! success ) { <nl> + for ( uint64_t i = 512 ; i < 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + REQUIRE ( ! success ) ; <nl> delete value ; <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( ! f . found ( ) ) ; <nl> } <nl> - } <nl> <nl> - cache - > disableGrowth ( ) ; <nl> - uint64_t target = cache - > usage ( ) / 2 ; <nl> - while ( ! cache - > resize ( target ) ) { <nl> - } ; <nl> - <nl> - for ( uint64_t i = 0 ; i < 16ULL * 1024ULL * 1024ULL ; i + + ) { <nl> - CachedValue * value = <nl> - CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> - bool success = cache - > insert ( value ) ; <nl> - if ( ! success ) { <nl> - delete value ; <nl> + manager . endTransaction ( tx ) ; <nl> + tx = manager . beginTransaction ( false ) ; <nl> + <nl> + uint64_t reinserted = 0 ; <nl> + for ( uint64_t i = 512 ; i < 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( success ) { <nl> + reinserted + + ; <nl> + auto f = cache - > find ( & i , sizeof ( uint64_t ) ) ; <nl> + REQUIRE ( f . found ( ) ) ; <nl> + } else { <nl> + delete value ; <nl> + } <nl> } <nl> - } <nl> + REQUIRE ( reinserted > = 256 ) ; <nl> <nl> - while ( cache - > isResizing ( ) ) { <nl> + manager . endTransaction ( tx ) ; <nl> + manager . destroyCache ( cache ) ; <nl> } <nl> - CHECK ( cache - > usage ( ) < = target ) ; <nl> <nl> - manager . destroyCache ( cache ) ; <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test mixed load behavior ( multi - threaded ) <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_mt_mixed_load " ) { <nl> - uint64_t initialSize = 16ULL * 1024ULL ; <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 1024ULL * 1024ULL * 1024ULL ) ; <nl> - size_t threadCount = 4 ; <nl> - std : : shared_ptr < Cache > cache = <nl> - manager . createCache ( Manager : : CacheType : : Transactional , initialSize , true ) ; <nl> - <nl> - uint64_t chunkSize = 16 * 1024 * 1024 ; <nl> - uint64_t initialInserts = 4 * 1024 * 1024 ; <nl> - uint64_t operationCount = 16 * 1024 * 1024 ; <nl> - std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> - std : : atomic < uint64_t > missCount ( 0 ) ; <nl> - auto worker = [ & manager , & cache , initialInserts , operationCount , & hitCount , <nl> - & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> - Transaction * tx = manager . beginTransaction ( false ) ; <nl> - / / fill with some initial data <nl> - for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> - uint64_t item = lower + i ; <nl> - CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> - & item , sizeof ( uint64_t ) ) ; <nl> - bool ok = cache - > insert ( value ) ; <nl> - if ( ! ok ) { <nl> + SECTION ( " verify cache can grow correctly when it runs out of space " ) { <nl> + uint64_t minimumUsage = 1024 * 1024 ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 1024 * 1024 * 1024 ) ; <nl> + auto cache = manager . createCache ( CacheType : : Transactional ) ; <nl> + <nl> + for ( uint64_t i = 0 ; i < 4 * 1024 * 1024 ; i + + ) { <nl> + CachedValue * value = <nl> + CachedValue : : construct ( & i , sizeof ( uint64_t ) , & i , sizeof ( uint64_t ) ) ; <nl> + bool success = cache - > insert ( value ) ; <nl> + if ( ! success ) { <nl> delete value ; <nl> } <nl> } <nl> <nl> - / / initialize valid range for keys that * might * be in cache <nl> - uint64_t validLower = lower ; <nl> - uint64_t validUpper = lower + initialInserts - 1 ; <nl> - uint64_t blacklistUpper = validUpper ; <nl> - <nl> - / / commence mixed workload <nl> - for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> - uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99UL ) ) ; <nl> + REQUIRE ( cache - > usage ( ) > minimumUsage ) ; <nl> <nl> - if ( r > = 99 ) { / / remove something <nl> - if ( validLower = = validUpper ) { <nl> - continue ; / / removed too much <nl> - } <nl> - <nl> - uint64_t item = validLower + + ; <nl> - <nl> - cache - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> - } else if ( r > = 90 ) { / / insert something <nl> - if ( validUpper = = upper ) { <nl> - continue ; / / already maxed out range <nl> - } <nl> + manager . destroyCache ( cache ) ; <nl> + } <nl> <nl> - uint64_t item = + + validUpper ; <nl> - if ( validUpper > blacklistUpper ) { <nl> - blacklistUpper = validUpper ; <nl> - } <nl> + SECTION ( " test behavior under mixed load " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 1024 * 1024 * 1024 ) ; <nl> + size_t threadCount = 4 ; <nl> + std : : shared_ptr < Cache > cache = <nl> + manager . createCache ( CacheType : : Transactional ) ; <nl> + <nl> + uint64_t chunkSize = 16 * 1024 * 1024 ; <nl> + uint64_t initialInserts = 4 * 1024 * 1024 ; <nl> + uint64_t operationCount = 16 * 1024 * 1024 ; <nl> + std : : atomic < uint64_t > hitCount ( 0 ) ; <nl> + std : : atomic < uint64_t > missCount ( 0 ) ; <nl> + auto worker = [ & manager , & cache , initialInserts , operationCount , & hitCount , <nl> + & missCount ] ( uint64_t lower , uint64_t upper ) - > void { <nl> + Transaction * tx = manager . beginTransaction ( false ) ; <nl> + / / fill with some initial data <nl> + for ( uint64_t i = 0 ; i < initialInserts ; i + + ) { <nl> + uint64_t item = lower + i ; <nl> CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> & item , sizeof ( uint64_t ) ) ; <nl> bool ok = cache - > insert ( value ) ; <nl> if ( ! ok ) { <nl> delete value ; <nl> } <nl> - } else if ( r > = 80 ) { / / blacklist something <nl> - if ( blacklistUpper = = upper ) { <nl> - continue ; / / already maxed out range <nl> - } <nl> - <nl> - uint64_t item = + + blacklistUpper ; <nl> - cache - > blacklist ( & item , sizeof ( uint64_t ) ) ; <nl> - } else { / / lookup something <nl> - uint64_t item = RandomGenerator : : interval ( <nl> - static_cast < int64_t > ( validLower ) , static_cast < int64_t > ( validUpper ) ) ; <nl> + } <nl> <nl> - Cache : : Finding f = cache - > find ( & item , sizeof ( uint64_t ) ) ; <nl> - if ( f . found ( ) ) { <nl> - hitCount + + ; <nl> - TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> - TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> - } else { <nl> - missCount + + ; <nl> - TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> + / / initialize valid range for keys that * might * be in cache <nl> + uint64_t validLower = lower ; <nl> + uint64_t validUpper = lower + initialInserts - 1 ; <nl> + uint64_t blacklistUpper = validUpper ; <nl> + <nl> + / / commence mixed workload <nl> + for ( uint64_t i = 0 ; i < operationCount ; i + + ) { <nl> + uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99UL ) ) ; <nl> + <nl> + if ( r > = 99 ) { / / remove something <nl> + if ( validLower = = validUpper ) { <nl> + continue ; / / removed too much <nl> + } <nl> + <nl> + uint64_t item = validLower + + ; <nl> + <nl> + cache - > remove ( & item , sizeof ( uint64_t ) ) ; <nl> + } else if ( r > = 90 ) { / / insert something <nl> + if ( validUpper = = upper ) { <nl> + continue ; / / already maxed out range <nl> + } <nl> + <nl> + uint64_t item = + + validUpper ; <nl> + if ( validUpper > blacklistUpper ) { <nl> + blacklistUpper = validUpper ; <nl> + } <nl> + CachedValue * value = CachedValue : : construct ( & item , sizeof ( uint64_t ) , <nl> + & item , sizeof ( uint64_t ) ) ; <nl> + bool ok = cache - > insert ( value ) ; <nl> + if ( ! ok ) { <nl> + delete value ; <nl> + } <nl> + } else if ( r > = 80 ) { / / blacklist something <nl> + if ( blacklistUpper = = upper ) { <nl> + continue ; / / already maxed out range <nl> + } <nl> + <nl> + uint64_t item = + + blacklistUpper ; <nl> + cache - > blacklist ( & item , sizeof ( uint64_t ) ) ; <nl> + } else { / / lookup something <nl> + uint64_t item = <nl> + RandomGenerator : : interval ( static_cast < int64_t > ( validLower ) , <nl> + static_cast < int64_t > ( validUpper ) ) ; <nl> + <nl> + Cache : : Finding f = cache - > find ( & item , sizeof ( uint64_t ) ) ; <nl> + if ( f . found ( ) ) { <nl> + hitCount + + ; <nl> + TRI_ASSERT ( f . value ( ) ! = nullptr ) ; <nl> + TRI_ASSERT ( f . value ( ) - > sameKey ( & item , sizeof ( uint64_t ) ) ) ; <nl> + } else { <nl> + missCount + + ; <nl> + TRI_ASSERT ( f . value ( ) = = nullptr ) ; <nl> + } <nl> } <nl> } <nl> + manager . endTransaction ( tx ) ; <nl> + } ; <nl> + <nl> + std : : vector < std : : thread * > threads ; <nl> + / / dispatch threads <nl> + for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> + uint64_t lower = i * chunkSize ; <nl> + uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> + threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> } <nl> - manager . endTransaction ( tx ) ; <nl> - } ; <nl> - <nl> - std : : vector < std : : thread * > threads ; <nl> - / / dispatch threads <nl> - for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> - uint64_t lower = i * chunkSize ; <nl> - uint64_t upper = ( ( i + 1 ) * chunkSize ) - 1 ; <nl> - threads . push_back ( new std : : thread ( worker , lower , upper ) ) ; <nl> - } <nl> - <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> - } <nl> <nl> - manager . destroyCache ( cache ) ; <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> + } <nl> <nl> + manager . destroyCache ( cache ) ; <nl> + RandomGenerator : : shutdown ( ) ; <nl> + } <nl> } <nl> - <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> mmm a / tests / Cache / TransactionalStore . cpp <nl> ppp b / tests / Cache / TransactionalStore . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # include " TransactionalStore . h " <nl> # include " Basics / Common . h " <nl> # include " Basics / StringBuffer . h " <nl> # include " Basics / files . h " <nl> + # include " Cache / Common . h " <nl> # include " Cache / Manager . h " <nl> # include " Cache / TransactionalCache . h " <nl> <nl> <nl> # include < rocksdb / utilities / transaction_db . h > <nl> <nl> # include < chrono > <nl> - # include < iostream > <nl> <nl> using namespace arangodb : : cache ; <nl> <nl> TransactionalStore : : TransactionalStore ( Manager * manager ) <nl> _writeOptions ( rocksdb : : WriteOptions ( ) ) , <nl> _txOptions ( rocksdb : : TransactionOptions ( ) ) { <nl> TRI_ASSERT ( manager ! = nullptr ) ; <nl> - _cache = manager - > createCache ( Manager : : CacheType : : Transactional , 1024 * 1024 , <nl> - true , true ) ; <nl> + _cache = manager - > createCache ( CacheType : : Transactional , true ) ; <nl> TRI_ASSERT ( _cache . get ( ) ! = nullptr ) ; <nl> <nl> _directory . appendText ( TRI_GetTempPath ( ) ) ; <nl> TransactionalStore : : TransactionalStore ( Manager * manager ) <nl> auto status = rocksdb : : TransactionDB : : Open ( <nl> options , rocksdb : : TransactionDBOptions ( ) , _directory . c_str ( ) , & _db ) ; <nl> if ( ! status . ok ( ) ) { <nl> - std : : cerr < < status . ToString ( ) < < std : : endl ; <nl> + throw ; <nl> } <nl> TRI_ASSERT ( status . ok ( ) ) ; <nl> } <nl> mmm a / tests / Cache / TransactionalStore . h <nl> ppp b / tests / Cache / TransactionalStore . h <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # ifndef UNITTESTS_CACHE_TRANSACTIONAL_STORE_H <nl> mmm a / tests / Cache / TransactionsWithBackingStore . cpp <nl> ppp b / tests / Cache / TransactionsWithBackingStore . cpp <nl> <nl> / / / <nl> / / / DISCLAIMER <nl> / / / <nl> - / / / Copyright 2017 triagens GmbH , Cologne , Germany <nl> + / / / Copyright 2017 ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / Licensed under the Apache License , Version 2 . 0 ( the " License " ) ; <nl> / / / you may not use this file except in compliance with the License . <nl> <nl> / / / See the License for the specific language governing permissions and <nl> / / / limitations under the License . <nl> / / / <nl> - / / / Copyright holder is triAGENS GmbH , Cologne , Germany <nl> + / / / Copyright holder is ArangoDB GmbH , Cologne , Germany <nl> / / / <nl> / / / @ author Daniel H . Larkin <nl> - / / / @ author Copyright 2017 , triAGENS GmbH , Cologne , Germany <nl> + / / / @ author Copyright 2017 , ArangoDB GmbH , Cologne , Germany <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> # include " Basics / Common . h " <nl> - <nl> - # include " catch . hpp " <nl> - <nl> # include " Cache / Manager . h " <nl> # include " Cache / Rebalancer . h " <nl> # include " Random / RandomGenerator . h " <nl> <nl> # include " MockScheduler . h " <nl> # include " TransactionalStore . h " <nl> + # include " catch . hpp " <nl> <nl> # include < stdint . h > <nl> # include < chrono > <nl> - # include < iostream > <nl> # include < thread > <nl> # include < vector > <nl> <nl> using namespace arangodb ; <nl> using namespace arangodb : : cache ; <nl> <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - / / - - SECTION - - test suite <nl> - / / mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - - <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief setup <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - TEST_CASE ( " CCacheTransactionsWithBackingStoreTest " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> - <nl> / * <nl> Planned Tests <nl> = = = = = = = = = = = = = <nl> reader gets . <nl> - Have writers sleep a while between transactions <nl> - Have readers read a set of documents within a transaction <nl> * / <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test hit rate for read - only hotset workload <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_single_readonly_hotset " ) { <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 16 * 1024 * 1024 ) ; <nl> - TransactionalStore store ( & manager ) ; <nl> - uint64_t totalDocuments = 1000000 ; <nl> - uint64_t hotsetSize = 50000 ; <nl> - size_t threadCount = 4 ; <nl> - uint64_t lookupsPerThread = 1000000 ; <nl> - <nl> - / / initial fill <nl> - for ( uint64_t i = 1 ; i < = totalDocuments ; i + + ) { <nl> - store . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> - } <nl> - <nl> - auto worker = [ & store , hotsetSize , totalDocuments , <nl> - lookupsPerThread ] ( ) - > void { <nl> - for ( uint64_t i = 0 ; i < lookupsPerThread ; i + + ) { <nl> - uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99 ) ) ; <nl> - uint64_t choice = ( r > = 90 ) ? RandomGenerator : : interval ( totalDocuments ) <nl> - : RandomGenerator : : interval ( hotsetSize ) ; <nl> - if ( choice = = 0 ) { <nl> - choice = 1 ; <nl> - } <nl> - <nl> - auto d = store . lookup ( nullptr , choice ) ; <nl> - TRI_ASSERT ( ! d . empty ( ) ) ; <nl> + TEST_CASE ( " cache with backing store " , " [ cache ] [ ! hide ] [ longRunning ] " ) { <nl> + SECTION ( " test hit rate for read - only hotset workload " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 16 * 1024 * 1024 ) ; <nl> + TransactionalStore store ( & manager ) ; <nl> + uint64_t totalDocuments = 1000000 ; <nl> + uint64_t hotsetSize = 50000 ; <nl> + size_t threadCount = 4 ; <nl> + uint64_t lookupsPerThread = 1000000 ; <nl> + <nl> + / / initial fill <nl> + for ( uint64_t i = 1 ; i < = totalDocuments ; i + + ) { <nl> + store . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> } <nl> - } ; <nl> - <nl> - std : : vector < std : : thread * > threads ; <nl> - / / dispatch threads <nl> - for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> - threads . push_back ( new std : : thread ( worker ) ) ; <nl> - } <nl> - <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> - } <nl> - <nl> - auto hitRates = manager . globalHitRates ( ) ; <nl> - CHECK ( hitRates . first > = 65 . 0 ) ; <nl> - CHECK ( hitRates . second > = 85 . 0 ) ; <nl> - <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test hit rate for mixed workload <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> - SECTION ( " tst_single_mixed_hitrate " ) { <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 256 * 1024 * 1024 ) ; <nl> - TransactionalStore store ( & manager ) ; <nl> - uint64_t totalDocuments = 1000000 ; <nl> - uint64_t batchSize = 1000 ; <nl> - size_t readerCount = 4 ; <nl> - size_t writerCount = 2 ; <nl> - std : : atomic < size_t > writersDone ( 0 ) ; <nl> - auto writeWaitInterval = std : : chrono : : milliseconds ( 10 ) ; <nl> - <nl> - / / initial fill <nl> - for ( uint64_t i = 1 ; i < = totalDocuments ; i + + ) { <nl> - store . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> - } <nl> + auto worker = [ & store , hotsetSize , totalDocuments , <nl> + lookupsPerThread ] ( ) - > void { <nl> + for ( uint64_t i = 0 ; i < lookupsPerThread ; i + + ) { <nl> + uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99 ) ) ; <nl> + uint64_t choice = ( r > = 90 ) ? RandomGenerator : : interval ( totalDocuments ) <nl> + : RandomGenerator : : interval ( hotsetSize ) ; <nl> + if ( choice = = 0 ) { <nl> + choice = 1 ; <nl> + } <nl> <nl> - auto readWorker = [ & store , & writersDone , writerCount , <nl> - totalDocuments ] ( ) - > void { <nl> - while ( writersDone . load ( ) < writerCount ) { <nl> - uint64_t choice = RandomGenerator : : interval ( totalDocuments ) ; <nl> - if ( choice = = 0 ) { <nl> - choice = 1 ; <nl> + auto d = store . lookup ( nullptr , choice ) ; <nl> + TRI_ASSERT ( ! d . empty ( ) ) ; <nl> } <nl> + } ; <nl> <nl> - auto d = store . lookup ( nullptr , choice ) ; <nl> - TRI_ASSERT ( ! d . empty ( ) ) ; <nl> + std : : vector < std : : thread * > threads ; <nl> + / / dispatch threads <nl> + for ( size_t i = 0 ; i < threadCount ; i + + ) { <nl> + threads . push_back ( new std : : thread ( worker ) ) ; <nl> } <nl> - } ; <nl> - <nl> - auto writeWorker = [ & store , & writersDone , writerCount , totalDocuments , <nl> - batchSize , & writeWaitInterval ] ( uint64_t lower , <nl> - uint64_t upper ) - > void { <nl> - uint64_t batches = ( upper + 1 - lower ) / batchSize ; <nl> - uint64_t choice = lower ; <nl> - for ( uint64_t batch = 0 ; batch < batches ; batch + + ) { <nl> - auto tx = store . beginTransaction ( false ) ; <nl> - for ( uint64_t i = 0 ; i < batchSize ; i + + ) { <nl> - auto d = store . lookup ( tx , choice ) ; <nl> - TRI_ASSERT ( ! d . empty ( ) ) ; <nl> - d . advance ( ) ; <nl> - bool ok = store . update ( tx , d ) ; <nl> - TRI_ASSERT ( ok ) ; <nl> - choice + + ; <nl> - } <nl> - bool ok = store . commit ( tx ) ; <nl> - TRI_ASSERT ( ok ) ; <nl> - std : : this_thread : : sleep_for ( writeWaitInterval ) ; <nl> + <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> } <nl> - writersDone + + ; <nl> - } ; <nl> <nl> - std : : vector < std : : thread * > threads ; <nl> - / / dispatch reader threads <nl> - for ( size_t i = 0 ; i < readerCount ; i + + ) { <nl> - threads . push_back ( new std : : thread ( readWorker ) ) ; <nl> - } <nl> - / / dispatch writer threads <nl> - uint64_t chunkSize = totalDocuments / writerCount ; <nl> - for ( size_t i = 0 ; i < writerCount ; i + + ) { <nl> - uint64_t lower = ( i * chunkSize ) + 1 ; <nl> - uint64_t upper = ( ( i + 1 ) * chunkSize ) ; <nl> - threads . push_back ( new std : : thread ( writeWorker , lower , upper ) ) ; <nl> - } <nl> + auto hitRates = manager . globalHitRates ( ) ; <nl> + REQUIRE ( hitRates . first > = 65 . 0 ) ; <nl> + REQUIRE ( hitRates . second > = 85 . 0 ) ; <nl> <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> + RandomGenerator : : shutdown ( ) ; <nl> } <nl> <nl> - auto hitRates = manager . globalHitRates ( ) ; <nl> - CHECK ( hitRates . first > = 40 . 0 ) ; <nl> - CHECK ( hitRates . second > = 60 . 0 ) ; <nl> - <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> - <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test transactionality for mixed workload <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - <nl> - SECTION ( " tst_single_mixed_transactionality " ) { <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 256 * 1024 * 1024 ) ; <nl> - TransactionalStore store ( & manager ) ; <nl> - uint64_t totalDocuments = 1000000 ; <nl> - uint64_t writeBatchSize = 1000 ; <nl> - uint64_t readBatchSize = 10000 ; <nl> - size_t readerCount = 4 ; <nl> - size_t writerCount = 2 ; <nl> - std : : atomic < size_t > writersDone ( 0 ) ; <nl> - auto writeWaitInterval = std : : chrono : : milliseconds ( 10 ) ; <nl> - <nl> - / / initial fill <nl> - for ( uint64_t i = 1 ; i < = totalDocuments ; i + + ) { <nl> - store . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> - } <nl> + SECTION ( " test hit rate for mixed workload " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 256 * 1024 * 1024 ) ; <nl> + TransactionalStore store ( & manager ) ; <nl> + uint64_t totalDocuments = 1000000 ; <nl> + uint64_t batchSize = 1000 ; <nl> + size_t readerCount = 4 ; <nl> + size_t writerCount = 2 ; <nl> + std : : atomic < size_t > writersDone ( 0 ) ; <nl> + auto writeWaitInterval = std : : chrono : : milliseconds ( 10 ) ; <nl> + <nl> + / / initial fill <nl> + for ( uint64_t i = 1 ; i < = totalDocuments ; i + + ) { <nl> + store . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> + } <nl> <nl> - auto readWorker = [ & store , & writersDone , writerCount , totalDocuments , <nl> - readBatchSize ] ( ) - > void { <nl> - while ( writersDone . load ( ) < writerCount ) { <nl> - auto tx = store . beginTransaction ( true ) ; <nl> - uint64_t start = static_cast < uint64_t > ( <nl> - std : : chrono : : steady_clock : : now ( ) . time_since_epoch ( ) . count ( ) ) ; <nl> - for ( uint64_t i = 0 ; i < readBatchSize ; i + + ) { <nl> + auto readWorker = [ & store , & writersDone , writerCount , <nl> + totalDocuments ] ( ) - > void { <nl> + while ( writersDone . load ( ) < writerCount ) { <nl> uint64_t choice = RandomGenerator : : interval ( totalDocuments ) ; <nl> if ( choice = = 0 ) { <nl> choice = 1 ; <nl> } <nl> <nl> - auto d = store . lookup ( tx , choice ) ; <nl> + auto d = store . lookup ( nullptr , choice ) ; <nl> TRI_ASSERT ( ! d . empty ( ) ) ; <nl> - TRI_ASSERT ( d . timestamp < = start ) ; / / transactionality <nl> } <nl> - bool ok = store . commit ( tx ) ; <nl> - TRI_ASSERT ( ok ) ; <nl> - } <nl> - } ; <nl> - <nl> - auto writeWorker = [ & store , & writersDone , writerCount , totalDocuments , <nl> - writeBatchSize , & writeWaitInterval ] ( <nl> - uint64_t lower , uint64_t upper ) - > void { <nl> - uint64_t batches = ( upper + 1 - lower ) / writeBatchSize ; <nl> - uint64_t choice = lower ; <nl> - for ( uint64_t batch = 0 ; batch < batches ; batch + + ) { <nl> - auto tx = store . beginTransaction ( false ) ; <nl> - for ( uint64_t i = 0 ; i < writeBatchSize ; i + + ) { <nl> - auto d = store . lookup ( tx , choice ) ; <nl> - TRI_ASSERT ( ! d . empty ( ) ) ; <nl> - d . advance ( ) ; <nl> - bool ok = store . update ( tx , d ) ; <nl> + } ; <nl> + <nl> + auto writeWorker = [ & store , & writersDone , writerCount , totalDocuments , <nl> + batchSize , & writeWaitInterval ] ( uint64_t lower , <nl> + uint64_t upper ) - > void { <nl> + uint64_t batches = ( upper + 1 - lower ) / batchSize ; <nl> + uint64_t choice = lower ; <nl> + for ( uint64_t batch = 0 ; batch < batches ; batch + + ) { <nl> + auto tx = store . beginTransaction ( false ) ; <nl> + for ( uint64_t i = 0 ; i < batchSize ; i + + ) { <nl> + auto d = store . lookup ( tx , choice ) ; <nl> + TRI_ASSERT ( ! d . empty ( ) ) ; <nl> + d . advance ( ) ; <nl> + bool ok = store . update ( tx , d ) ; <nl> + TRI_ASSERT ( ok ) ; <nl> + choice + + ; <nl> + } <nl> + bool ok = store . commit ( tx ) ; <nl> TRI_ASSERT ( ok ) ; <nl> - choice + + ; <nl> + std : : this_thread : : sleep_for ( writeWaitInterval ) ; <nl> } <nl> - bool ok = store . commit ( tx ) ; <nl> - TRI_ASSERT ( ok ) ; <nl> - std : : this_thread : : sleep_for ( writeWaitInterval ) ; <nl> + writersDone + + ; <nl> + } ; <nl> + <nl> + std : : vector < std : : thread * > threads ; <nl> + / / dispatch reader threads <nl> + for ( size_t i = 0 ; i < readerCount ; i + + ) { <nl> + threads . push_back ( new std : : thread ( readWorker ) ) ; <nl> + } <nl> + / / dispatch writer threads <nl> + uint64_t chunkSize = totalDocuments / writerCount ; <nl> + for ( size_t i = 0 ; i < writerCount ; i + + ) { <nl> + uint64_t lower = ( i * chunkSize ) + 1 ; <nl> + uint64_t upper = ( ( i + 1 ) * chunkSize ) ; <nl> + threads . push_back ( new std : : thread ( writeWorker , lower , upper ) ) ; <nl> } <nl> - writersDone + + ; <nl> - } ; <nl> <nl> - std : : vector < std : : thread * > threads ; <nl> - / / dispatch reader threads <nl> - for ( size_t i = 0 ; i < readerCount ; i + + ) { <nl> - threads . push_back ( new std : : thread ( readWorker ) ) ; <nl> - } <nl> - / / dispatch writer threads <nl> - uint64_t chunkSize = totalDocuments / writerCount ; <nl> - for ( size_t i = 0 ; i < writerCount ; i + + ) { <nl> - uint64_t lower = ( i * chunkSize ) + 1 ; <nl> - uint64_t upper = ( ( i + 1 ) * chunkSize ) ; <nl> - threads . push_back ( new std : : thread ( writeWorker , lower , upper ) ) ; <nl> - } <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> + } <nl> <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> - } <nl> + auto hitRates = manager . globalHitRates ( ) ; <nl> + REQUIRE ( hitRates . first > = 35 . 0 ) ; <nl> + REQUIRE ( hitRates . second > = 50 . 0 ) ; <nl> <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> + RandomGenerator : : shutdown ( ) ; <nl> + } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief test rebalancing in the wild <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + SECTION ( " test transactionality for mixed workload " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 256 * 1024 * 1024 ) ; <nl> + TransactionalStore store ( & manager ) ; <nl> + uint64_t totalDocuments = 1000000 ; <nl> + uint64_t writeBatchSize = 1000 ; <nl> + uint64_t readBatchSize = 10000 ; <nl> + size_t readerCount = 4 ; <nl> + size_t writerCount = 2 ; <nl> + std : : atomic < size_t > writersDone ( 0 ) ; <nl> + auto writeWaitInterval = std : : chrono : : milliseconds ( 10 ) ; <nl> + <nl> + / / initial fill <nl> + for ( uint64_t i = 1 ; i < = totalDocuments ; i + + ) { <nl> + store . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> + } <nl> <nl> - SECTION ( " tst_multi_rebalancing " ) { <nl> - RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> - MockScheduler scheduler ( 4 ) ; <nl> - Manager manager ( scheduler . ioService ( ) , 16 * 1024 * 1024 ) ; <nl> - Rebalancer rebalancer ( & manager ) ; <nl> - TransactionalStore store1 ( & manager ) ; <nl> - TransactionalStore store2 ( & manager ) ; <nl> - uint64_t totalDocuments = 1000000 ; <nl> - uint64_t writeBatchSize = 1000 ; <nl> - uint64_t readBatchSize = 100 ; <nl> - size_t readerCount = 4 ; <nl> - size_t writerCount = 2 ; <nl> - std : : atomic < size_t > writersDone ( 0 ) ; <nl> - auto writeWaitInterval = std : : chrono : : milliseconds ( 50 ) ; <nl> - uint32_t storeBias ; <nl> - <nl> - bool doneRebalancing = false ; <nl> - auto rebalanceWorker = [ & rebalancer , & doneRebalancing ] ( ) - > void { <nl> - while ( ! doneRebalancing ) { <nl> - bool rebalanced = rebalancer . rebalance ( ) ; <nl> - if ( rebalanced ) { <nl> - std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 500 ) ) ; <nl> - } else { <nl> - std : : this_thread : : sleep_for ( std : : chrono : : microseconds ( 100 ) ) ; <nl> + auto readWorker = [ & store , & writersDone , writerCount , totalDocuments , <nl> + readBatchSize ] ( ) - > void { <nl> + while ( writersDone . load ( ) < writerCount ) { <nl> + auto tx = store . beginTransaction ( true ) ; <nl> + uint64_t start = static_cast < uint64_t > ( <nl> + std : : chrono : : steady_clock : : now ( ) . time_since_epoch ( ) . count ( ) ) ; <nl> + for ( uint64_t i = 0 ; i < readBatchSize ; i + + ) { <nl> + uint64_t choice = RandomGenerator : : interval ( totalDocuments ) ; <nl> + if ( choice = = 0 ) { <nl> + choice = 1 ; <nl> + } <nl> + <nl> + auto d = store . lookup ( tx , choice ) ; <nl> + TRI_ASSERT ( ! d . empty ( ) ) ; <nl> + TRI_ASSERT ( d . timestamp < = start ) ; / / transactionality <nl> + } <nl> + bool ok = store . commit ( tx ) ; <nl> + TRI_ASSERT ( ok ) ; <nl> + } <nl> + } ; <nl> + <nl> + auto writeWorker = [ & store , & writersDone , writerCount , totalDocuments , <nl> + writeBatchSize , & writeWaitInterval ] ( <nl> + uint64_t lower , uint64_t upper ) - > void { <nl> + uint64_t batches = ( upper + 1 - lower ) / writeBatchSize ; <nl> + uint64_t choice = lower ; <nl> + for ( uint64_t batch = 0 ; batch < batches ; batch + + ) { <nl> + auto tx = store . beginTransaction ( false ) ; <nl> + for ( uint64_t i = 0 ; i < writeBatchSize ; i + + ) { <nl> + auto d = store . lookup ( tx , choice ) ; <nl> + TRI_ASSERT ( ! d . empty ( ) ) ; <nl> + d . advance ( ) ; <nl> + bool ok = store . update ( tx , d ) ; <nl> + TRI_ASSERT ( ok ) ; <nl> + choice + + ; <nl> + } <nl> + bool ok = store . commit ( tx ) ; <nl> + TRI_ASSERT ( ok ) ; <nl> + std : : this_thread : : sleep_for ( writeWaitInterval ) ; <nl> } <nl> + writersDone + + ; <nl> + } ; <nl> + <nl> + std : : vector < std : : thread * > threads ; <nl> + / / dispatch reader threads <nl> + for ( size_t i = 0 ; i < readerCount ; i + + ) { <nl> + threads . push_back ( new std : : thread ( readWorker ) ) ; <nl> + } <nl> + / / dispatch writer threads <nl> + uint64_t chunkSize = totalDocuments / writerCount ; <nl> + for ( size_t i = 0 ; i < writerCount ; i + + ) { <nl> + uint64_t lower = ( i * chunkSize ) + 1 ; <nl> + uint64_t upper = ( ( i + 1 ) * chunkSize ) ; <nl> + threads . push_back ( new std : : thread ( writeWorker , lower , upper ) ) ; <nl> + } <nl> + <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> } <nl> - } ; <nl> - auto rebalancerThread = new std : : thread ( rebalanceWorker ) ; <nl> <nl> - / / initial fill <nl> - for ( uint64_t i = 1 ; i < = totalDocuments ; i + + ) { <nl> - store1 . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> - store2 . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> + RandomGenerator : : shutdown ( ) ; <nl> } <nl> <nl> - auto readWorker = [ & store1 , & store2 , & storeBias , & writersDone , writerCount , <nl> - totalDocuments , readBatchSize ] ( ) - > void { <nl> - while ( writersDone . load ( ) < writerCount ) { <nl> - uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99 ) ) ; <nl> - TransactionalStore * store = ( r < = storeBias ) ? & store1 : & store2 ; <nl> - auto tx = store - > beginTransaction ( true ) ; <nl> - uint64_t start = static_cast < uint64_t > ( <nl> - std : : chrono : : steady_clock : : now ( ) . time_since_epoch ( ) . count ( ) ) ; <nl> - for ( uint64_t i = 0 ; i < readBatchSize ; i + + ) { <nl> - uint64_t choice = RandomGenerator : : interval ( totalDocuments ) ; <nl> - if ( choice = = 0 ) { <nl> - choice = 1 ; <nl> + SECTION ( " test rebalancing in the wild " ) { <nl> + RandomGenerator : : initialize ( RandomGenerator : : RandomType : : MERSENNE ) ; <nl> + MockScheduler scheduler ( 4 ) ; <nl> + Manager manager ( scheduler . ioService ( ) , 16 * 1024 * 1024 ) ; <nl> + Rebalancer rebalancer ( & manager ) ; <nl> + TransactionalStore store1 ( & manager ) ; <nl> + TransactionalStore store2 ( & manager ) ; <nl> + uint64_t totalDocuments = 1000000 ; <nl> + uint64_t writeBatchSize = 1000 ; <nl> + uint64_t readBatchSize = 100 ; <nl> + size_t readerCount = 4 ; <nl> + size_t writerCount = 2 ; <nl> + std : : atomic < size_t > writersDone ( 0 ) ; <nl> + auto writeWaitInterval = std : : chrono : : milliseconds ( 50 ) ; <nl> + uint32_t storeBias ; <nl> + <nl> + bool doneRebalancing = false ; <nl> + auto rebalanceWorker = [ & rebalancer , & doneRebalancing ] ( ) - > void { <nl> + while ( ! doneRebalancing ) { <nl> + bool rebalanced = rebalancer . rebalance ( ) ; <nl> + if ( rebalanced ) { <nl> + std : : this_thread : : sleep_for ( std : : chrono : : milliseconds ( 500 ) ) ; <nl> + } else { <nl> + std : : this_thread : : sleep_for ( std : : chrono : : microseconds ( 100 ) ) ; <nl> } <nl> - <nl> - auto d = store - > lookup ( tx , choice ) ; <nl> - TRI_ASSERT ( ! d . empty ( ) ) ; <nl> - TRI_ASSERT ( d . timestamp < = start ) ; / / transactionality <nl> } <nl> - bool ok = store - > commit ( tx ) ; <nl> - TRI_ASSERT ( ok ) ; <nl> + } ; <nl> + auto rebalancerThread = new std : : thread ( rebalanceWorker ) ; <nl> + <nl> + / / initial fill <nl> + for ( uint64_t i = 1 ; i < = totalDocuments ; i + + ) { <nl> + store1 . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> + store2 . insert ( nullptr , TransactionalStore : : Document ( i ) ) ; <nl> } <nl> - } ; <nl> - <nl> - auto writeWorker = [ & store1 , & store2 , & storeBias , & writersDone , writerCount , <nl> - totalDocuments , writeBatchSize , & writeWaitInterval ] ( <nl> - uint64_t lower , uint64_t upper ) - > void { <nl> - uint64_t batches = ( upper + 1 - lower ) / writeBatchSize ; <nl> - uint64_t choice = lower ; <nl> - for ( uint64_t batch = 0 ; batch < batches ; batch + + ) { <nl> - uint32_t r = RandomGenerator : : interval ( static_cast < uint32_t > ( 99 ) ) ; <nl> - TransactionalStore * store = ( r < = storeBias ) ? & store1 : & store2 ; <nl> - auto tx = store - > beginTransaction ( false ) ; <nl> - for ( uint64_t i = 0 ; i < writeBatchSize ; i + + ) { <nl> - auto d = store - > lookup ( tx , choice ) ; <nl> - TRI_ASSERT ( ! d . empty ( ) ) ; <nl> - d . advance ( ) ; <nl> - bool ok = store - > update ( tx , d ) ; <nl> + <nl> + auto readWorker = [ & store1 , & store2 , & storeBias , & writersDone , writerCount , <nl> + totalDocuments , readBatchSize ] ( ) - > void { <nl> + while ( writersDone . load ( ) < writerCount ) { <nl> + uint32_t r = RandomGenerator : : interval ( 99UL ) ; <nl> + TransactionalStore * store = ( r < = storeBias ) ? & store1 : & store2 ; <nl> + auto tx = store - > beginTransaction ( true ) ; <nl> + uint64_t start = static_cast < uint64_t > ( <nl> + std : : chrono : : steady_clock : : now ( ) . time_since_epoch ( ) . count ( ) ) ; <nl> + for ( uint64_t i = 0 ; i < readBatchSize ; i + + ) { <nl> + uint64_t choice = RandomGenerator : : interval ( totalDocuments ) ; <nl> + if ( choice = = 0 ) { <nl> + choice = 1 ; <nl> + } <nl> + <nl> + auto d = store - > lookup ( tx , choice ) ; <nl> + TRI_ASSERT ( ! d . empty ( ) ) ; <nl> + TRI_ASSERT ( d . timestamp < = start ) ; / / transactionality <nl> + } <nl> + bool ok = store - > commit ( tx ) ; <nl> TRI_ASSERT ( ok ) ; <nl> - choice + + ; <nl> } <nl> - bool ok = store - > commit ( tx ) ; <nl> - TRI_ASSERT ( ok ) ; <nl> - std : : this_thread : : sleep_for ( writeWaitInterval ) ; <nl> - } <nl> - writersDone + + ; <nl> - } ; <nl> + } ; <nl> + <nl> + auto writeWorker = [ & store1 , & store2 , & storeBias , & writersDone , writerCount , <nl> + totalDocuments , writeBatchSize , & writeWaitInterval ] ( <nl> + uint64_t lower , uint64_t upper ) - > void { <nl> + uint64_t batches = ( upper + 1 - lower ) / writeBatchSize ; <nl> + uint64_t choice = lower ; <nl> + for ( uint64_t batch = 0 ; batch < batches ; batch + + ) { <nl> + uint32_t r = RandomGenerator : : interval ( 99UL ) ; <nl> + TransactionalStore * store = ( r < = storeBias ) ? & store1 : & store2 ; <nl> + auto tx = store - > beginTransaction ( false ) ; <nl> + for ( uint64_t i = 0 ; i < writeBatchSize ; i + + ) { <nl> + auto d = store - > lookup ( tx , choice ) ; <nl> + TRI_ASSERT ( ! d . empty ( ) ) ; <nl> + d . advance ( ) ; <nl> + bool ok = store - > update ( tx , d ) ; <nl> + TRI_ASSERT ( ok ) ; <nl> + choice + + ; <nl> + } <nl> + bool ok = store - > commit ( tx ) ; <nl> + TRI_ASSERT ( ok ) ; <nl> + std : : this_thread : : sleep_for ( writeWaitInterval ) ; <nl> + } <nl> + writersDone + + ; <nl> + } ; <nl> <nl> - std : : vector < std : : thread * > threads ; <nl> + std : : vector < std : : thread * > threads ; <nl> <nl> - / / bias toward first store <nl> - storeBias = 80 ; <nl> + / / bias toward first store <nl> + storeBias = 80 ; <nl> <nl> - / / dispatch reader threads <nl> - for ( size_t i = 0 ; i < readerCount ; i + + ) { <nl> - threads . push_back ( new std : : thread ( readWorker ) ) ; <nl> - } <nl> - / / dispatch writer threads <nl> - uint64_t chunkSize = totalDocuments / writerCount ; <nl> - for ( size_t i = 0 ; i < writerCount ; i + + ) { <nl> - uint64_t lower = ( i * chunkSize ) + 1 ; <nl> - uint64_t upper = ( ( i + 1 ) * chunkSize ) ; <nl> - threads . push_back ( new std : : thread ( writeWorker , lower , upper ) ) ; <nl> - } <nl> + / / dispatch reader threads <nl> + for ( size_t i = 0 ; i < readerCount ; i + + ) { <nl> + threads . push_back ( new std : : thread ( readWorker ) ) ; <nl> + } <nl> + / / dispatch writer threads <nl> + uint64_t chunkSize = totalDocuments / writerCount ; <nl> + for ( size_t i = 0 ; i < writerCount ; i + + ) { <nl> + uint64_t lower = ( i * chunkSize ) + 1 ; <nl> + uint64_t upper = ( ( i + 1 ) * chunkSize ) ; <nl> + threads . push_back ( new std : : thread ( writeWorker , lower , upper ) ) ; <nl> + } <nl> <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> - } <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> + } <nl> <nl> - while ( store1 . cache ( ) - > isResizing ( ) | | store2 . cache ( ) - > isResizing ( ) ) { <nl> - std : : this_thread : : yield ( ) ; <nl> - } <nl> - / * BOOST_CHECK_MESSAGE ( 2 * store1 . cache ( ) - > limit ( ) > store2 . cache ( ) - > limit ( ) , <nl> - 2 * store1 . cache ( ) - > limit ( ) < < " ! > " <nl> - < < store2 . cache ( ) - > limit ( ) ) ; <nl> - * / <nl> - threads . clear ( ) ; <nl> - <nl> - / / bias toward second store <nl> - storeBias = 20 ; <nl> - <nl> - / / dispatch reader threads <nl> - for ( size_t i = 0 ; i < readerCount ; i + + ) { <nl> - threads . push_back ( new std : : thread ( readWorker ) ) ; <nl> - } <nl> - / / dispatch writer threads <nl> - for ( size_t i = 0 ; i < writerCount ; i + + ) { <nl> - uint64_t lower = ( i * chunkSize ) + 1 ; <nl> - uint64_t upper = ( ( i + 1 ) * chunkSize ) ; <nl> - threads . push_back ( new std : : thread ( writeWorker , lower , upper ) ) ; <nl> - } <nl> + while ( store1 . cache ( ) - > isResizing ( ) | | store2 . cache ( ) - > isResizing ( ) ) { <nl> + std : : this_thread : : yield ( ) ; <nl> + } <nl> + threads . clear ( ) ; <nl> <nl> - / / join threads <nl> - for ( auto t : threads ) { <nl> - t - > join ( ) ; <nl> - delete t ; <nl> - } <nl> + / / bias toward second store <nl> + storeBias = 20 ; <nl> <nl> - while ( store1 . cache ( ) - > isResizing ( ) | | store2 . cache ( ) - > isResizing ( ) ) { <nl> - std : : this_thread : : yield ( ) ; <nl> - } <nl> - / * BOOST_CHECK_MESSAGE ( store1 . cache ( ) - > limit ( ) < 2 * store2 . cache ( ) - > limit ( ) , <nl> - store1 . cache ( ) - > limit ( ) < < " ! < " <nl> - < < 2 * store2 . cache ( ) - > limit ( ) ) ; <nl> - * / <nl> - doneRebalancing = true ; <nl> - rebalancerThread - > join ( ) ; <nl> - delete rebalancerThread ; <nl> - <nl> - RandomGenerator : : shutdown ( ) ; <nl> - } <nl> + / / dispatch reader threads <nl> + for ( size_t i = 0 ; i < readerCount ; i + + ) { <nl> + threads . push_back ( new std : : thread ( readWorker ) ) ; <nl> + } <nl> + / / dispatch writer threads <nl> + for ( size_t i = 0 ; i < writerCount ; i + + ) { <nl> + uint64_t lower = ( i * chunkSize ) + 1 ; <nl> + uint64_t upper = ( ( i + 1 ) * chunkSize ) ; <nl> + threads . push_back ( new std : : thread ( writeWorker , lower , upper ) ) ; <nl> + } <nl> <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> - / / / @ brief generate tests <nl> - / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> + / / join threads <nl> + for ( auto t : threads ) { <nl> + t - > join ( ) ; <nl> + delete t ; <nl> + } <nl> <nl> - } <nl> + while ( store1 . cache ( ) - > isResizing ( ) | | store2 . cache ( ) - > isResizing ( ) ) { <nl> + std : : this_thread : : yield ( ) ; <nl> + } <nl> + doneRebalancing = true ; <nl> + rebalancerThread - > join ( ) ; <nl> + delete rebalancerThread ; <nl> <nl> - / / Local Variables : <nl> - / / mode : outline - minor <nl> - / / outline - regexp : " ^ \ \ ( / / / @ brief \ \ | / / / { @ inheritDoc } \ \ | / / / @ addtogroup \ \ | / / <nl> - / / - - SECTION - - \ \ | / / / @ \ \ } \ \ ) " <nl> - / / End : <nl> + RandomGenerator : : shutdown ( ) ; <nl> + } <nl> + } <nl>
Improved cache rebalancing and allocation strategies .
arangodb/arangodb
6a449efb06dc680c667e127bc4de85b9ccee2ab4
2017-03-11T17:15:08Z
mmm a / externals / cmake - modules / GetGitRevisionDescription . cmake . in <nl> ppp b / externals / cmake - modules / GetGitRevisionDescription . cmake . in <nl> else ( ) <nl> endif ( ) <nl> <nl> if ( NOT HEAD_HASH ) <nl> - file ( READ " @ GIT_DATA @ / head - ref " HEAD_HASH LIMIT 1024 ) <nl> - string ( STRIP " $ { HEAD_HASH } " HEAD_HASH ) <nl> + if ( EXISTS " @ GIT_DATA @ / head - ref " ) <nl> + file ( READ " @ GIT_DATA @ / head - ref " HEAD_HASH LIMIT 1024 ) <nl> + string ( STRIP " $ { HEAD_HASH } " HEAD_HASH ) <nl> + else ( ) <nl> + set ( HEAD_HASH " Unknown " ) <nl> + endif ( ) <nl> endif ( ) <nl>
Merge pull request from janisozaur / patch - 12
yuzu-emu/yuzu
997c3dc6ff1eddecaca5b9d79d834631145cb1f4
2018-11-19T03:53:34Z
mmm a / tensorflow / core / platform / default / build_config . bzl <nl> ppp b / tensorflow / core / platform / default / build_config . bzl <nl> def tf_additional_numa_copts ( ) : <nl> " - DTENSORFLOW_USE_NUMA " , <nl> ] , <nl> } ) <nl> + <nl> + def tf_additional_rpc_deps ( ) : <nl> + return [ ] <nl> mmm a / tensorflow / python / distribute / cluster_resolver / BUILD <nl> ppp b / tensorflow / python / distribute / cluster_resolver / BUILD <nl> <nl> # Description : Operations defined for Cluster Resolvers <nl> <nl> load ( " / / tensorflow : tensorflow . bzl " , " tf_py_test " ) <nl> + load ( <nl> + " / / tensorflow / core : platform / default / build_config . bzl " , <nl> + " tf_additional_rpc_deps " , <nl> + ) <nl> <nl> package ( <nl> default_visibility = [ <nl> py_library ( <nl> deps = [ <nl> " : base_cluster_resolver_py " , <nl> " / / tensorflow / python : training_server_lib " , <nl> - ] , <nl> + ] + tf_additional_rpc_deps ( ) , <nl> ) <nl> <nl> py_library ( <nl>
A noop change .
tensorflow/tensorflow
9e4d0407682e08e8ec53f65a29367406fe831510
2019-08-01T21:56:18Z
mmm a / hphp / hack / src / client / clientRage . ml <nl> ppp b / hphp / hack / src / client / clientRage . ml <nl> let rage_www ( env : env ) : ( ( string * string ) option * string ) Lwt . t = <nl> ( match clowder_result with <nl> | Error failure - > <nl> Lwt . return <nl> - ( Some ( " www_hgdiff . txt " , stdout ) , <nl> + ( Some ( " www_hgdiff . txt " , hgdiff ) , <nl> Printf . sprintf <nl> " hg patch - - no - commit www_hgdiff . txt \ n \ nnote : clowder failed to put : \ n % s " <nl> failure ) <nl> | Ok clowder_script - > <nl> Lwt . return <nl> - ( Some ( " www_hgdiff . txt " , stdout ) , <nl> + ( Some ( " www_hgdiff . txt " , hgdiff ) , <nl> clowder_script ^ " | hg patch - - no - commit - " ) ) <nl> in <nl> + let % lwt hg_st_result = <nl> + Lwt_utils . exec_checked <nl> + ? env : hgplain_env <nl> + Exec_command . Hg <nl> + ~ timeout : 30 . 0 <nl> + [ | " status " ; " - - cwd " ; Path . to_string env . root | ] <nl> + in <nl> + let hg_st = <nl> + match hg_st_result with <nl> + | Error failure - > format_failure " Unable to ` hg status ` " failure <nl> + | Ok { Lwt_utils . Process_success . stdout ; _ } - > " hg status : \ n " ^ stdout <nl> + in <nl> Lwt . return <nl> ( patch_item , <nl> - Printf . sprintf " hg update - C % s \ n \ n % s \ n " mergebase patch_instructions ) <nl> + Printf . sprintf <nl> + " hg update - C % s \ n \ n % s \ n \ n \ n % s " <nl> + mergebase <nl> + patch_instructions <nl> + hg_st ) <nl> <nl> let rage_www_errors ( env : env ) : string Lwt . t = <nl> let % lwt www_errors_result = <nl>
better hg diff , status
facebook/hhvm
693c705b903c8c7e979011a97ebd8a6effa922d6
2020-05-14T18:07:21Z
mmm a / db / btree . cpp <nl> ppp b / db / btree . cpp <nl> DiskLoc BtreeBucket : : advance ( const DiskLoc & thisLoc , int & keyOfs , int direction , <nl> int ko = keyOfs + direction ; <nl> DiskLoc nextDown = childForPos ( ko + adj ) ; <nl> if ( ! nextDown . isNull ( ) ) { <nl> - / / nextDown . btree ( ) - > dump ( ) ; / / TEMP : <nl> while ( 1 ) { <nl> keyOfs = direction > 0 ? 0 : nextDown . btree ( ) - > n - 1 ; <nl> DiskLoc loc = nextDown . btree ( ) - > childForPos ( keyOfs + adj ) ; <nl> mmm a / db / clientcursor . cpp <nl> ppp b / db / clientcursor . cpp <nl> void aboutToDelete ( const DiskLoc & dl ) { <nl> for ( vector < ClientCursor * > : : iterator i = toAdvance . begin ( ) ; <nl> i ! = toAdvance . end ( ) ; + + i ) <nl> { <nl> - ( * i ) - > c - > checkLocation ( ) ; <nl> - ( * i ) - > c - > advance ( ) ; <nl> - wassert ( ( * i ) - > c - > currLoc ( ) ! = dl ) ; <nl> - ( * i ) - > updateLocation ( ) ; <nl> + Cursor * c = ( * i ) - > c . get ( ) ; <nl> + DiskLoc tmp1 = c - > currLoc ( ) ; <nl> + if ( tmp1 ! = dl ) { <nl> + / * this might indicate a failure to call ClientCursor : : updateLocation ( ) * / <nl> + problem ( ) < < " warning : cursor loc does not match byLoc position ! " < < endl ; <nl> + } <nl> + c - > checkLocation ( ) ; <nl> + if ( c - > tailing ( ) ) { <nl> + DEV cout < < " killing cursor as we would have to advance it and it is tailable " < < endl ; <nl> + delete * i ; <nl> + continue ; <nl> + } <nl> + c - > advance ( ) ; <nl> + DiskLoc newLoc = c - > currLoc ( ) ; <nl> + if ( newLoc . isNull ( ) ) { <nl> + / / advanced to end - - delete cursor <nl> + delete * i ; <nl> + } <nl> + else { <nl> + wassert ( newLoc ! = dl ) ; <nl> + ( * i ) - > updateLocation ( ) ; <nl> + } <nl> } <nl> } <nl> <nl> void ClientCursor : : updateLocation ( ) { <nl> assert ( cursorid ) ; <nl> DiskLoc cl = c - > currLoc ( ) ; <nl> if ( lastLoc ( ) = = cl ) { <nl> - log ( ) < < " info : lastloc = = curloc " < < ns < < ' \ n ' ; <nl> + / / log ( ) < < " info : lastloc = = curloc " < < ns < < ' \ n ' ; <nl> return ; <nl> } <nl> setLastLoc ( cl ) ; <nl> new file mode 100644 <nl> index 000000000000 . . ec063d56e501 <nl> mmm / dev / null <nl> ppp b / db / cursor . h <nl> <nl> + / * * <nl> + * Copyright ( C ) 2008 10gen Inc . <nl> + * <nl> + * This program is free software : you can redistribute it and / or modify <nl> + * it under the terms of the GNU Affero General Public License , version 3 , <nl> + * as published by the Free Software Foundation . <nl> + * <nl> + * This program is distributed in the hope that it will be useful , <nl> + * but WITHOUT ANY WARRANTY ; without even the implied warranty of <nl> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the <nl> + * GNU Affero General Public License for more details . <nl> + * <nl> + * You should have received a copy of the GNU Affero General Public License <nl> + * along with this program . If not , see < http : / / www . gnu . org / licenses / > . <nl> + * / <nl> + <nl> + # pragma once <nl> + <nl> + # include " . . / stdafx . h " <nl> + <nl> + / * Query cursors , base class . This is for our internal cursors . " ClientCursor " is a separate <nl> + concept and is for the user ' s cursor . <nl> + * / <nl> + class Cursor { <nl> + public : <nl> + virtual bool ok ( ) = 0 ; <nl> + bool eof ( ) { return ! ok ( ) ; } <nl> + virtual Record * _current ( ) = 0 ; <nl> + virtual JSObj current ( ) = 0 ; <nl> + virtual DiskLoc currLoc ( ) = 0 ; <nl> + virtual bool advance ( ) = 0 ; / * true = ok * / <nl> + <nl> + / * Implement these if you want the cursor to be " tailable " * / <nl> + / * tailable ( ) : if true , cursor has tailable capability AND <nl> + the user requested use of those semantics . * / <nl> + virtual bool tailable ( ) { return false ; } <nl> + / * indicates we should mark where we are and go into tail mode . * / <nl> + virtual void setAtTail ( ) { assert ( false ) ; } <nl> + / * you must call tailResume before reusing the cursor * / <nl> + virtual void tailResume ( ) { } <nl> + / * indicates ifi we are actively tailing . once it goes active , <nl> + this should return treu even after tailResume ( ) . * / <nl> + virtual bool tailing ( ) { return false ; } <nl> + <nl> + virtual void aboutToDeleteBucket ( const DiskLoc & b ) { } <nl> + <nl> + / * optional to implement . if implemented , means ' this ' is a prototype * / <nl> + virtual Cursor * clone ( ) { return 0 ; } <nl> + <nl> + virtual bool tempStopOnMiss ( ) { return false ; } <nl> + <nl> + / * called after every query block is iterated - - i . e . between getMore ( ) blocks <nl> + so you can note where we are , if necessary . <nl> + * / <nl> + virtual void noteLocation ( ) { } <nl> + <nl> + / * called before query getmore block is iterated * / <nl> + virtual void checkLocation ( ) { } <nl> + <nl> + virtual const char * toString ( ) { return " abstract ? " ; } <nl> + <nl> + / * used for multikey index traversal to avoid sending back dups . see JSMatcher : : matches ( ) * / <nl> + set < DiskLoc > dups ; <nl> + bool getsetdup ( DiskLoc loc ) { <nl> + / * to save mem only call this when there is risk of dups ( e . g . when ' deep ' / multikey ) * / <nl> + if ( dups . count ( loc ) > 0 ) <nl> + return true ; <nl> + dups . insert ( loc ) ; <nl> + return false ; <nl> + } <nl> + } ; <nl> + <nl> + / * table - scan style cursor * / <nl> + class BasicCursor : public Cursor { <nl> + protected : <nl> + DiskLoc curr , last ; <nl> + <nl> + private : <nl> + / / for tailing : <nl> + enum State { Normal , TailPoint , TailResumed } state ; <nl> + void init ( ) { state = Normal ; } <nl> + <nl> + public : <nl> + bool ok ( ) { return ! curr . isNull ( ) ; } <nl> + Record * _current ( ) { <nl> + assert ( ok ( ) ) ; <nl> + return curr . rec ( ) ; <nl> + } <nl> + JSObj current ( ) { <nl> + Record * r = _current ( ) ; <nl> + JSObj j ( r ) ; <nl> + return j ; <nl> + } <nl> + virtual DiskLoc currLoc ( ) { return curr ; } <nl> + <nl> + bool advance ( ) { <nl> + if ( eof ( ) ) <nl> + return false ; <nl> + Record * r = _current ( ) ; <nl> + last = curr ; <nl> + curr = r - > getNext ( curr ) ; <nl> + return ok ( ) ; <nl> + } <nl> + <nl> + BasicCursor ( DiskLoc dl ) : curr ( dl ) { init ( ) ; } <nl> + BasicCursor ( ) { init ( ) ; } <nl> + virtual const char * toString ( ) { return " BasicCursor " ; } <nl> + <nl> + virtual void tailResume ( ) { <nl> + if ( state = = TailPoint ) { <nl> + state = TailResumed ; <nl> + advance ( ) ; <nl> + } <nl> + } <nl> + virtual void setAtTail ( ) { <nl> + assert ( state ! = TailPoint ) ; <nl> + assert ( curr . isNull ( ) ) ; <nl> + assert ( ! last . isNull ( ) ) ; <nl> + curr = last ; last . Null ( ) ; <nl> + state = TailPoint ; <nl> + } <nl> + virtual bool tailable ( ) { <nl> + / / to go into tail mode we need a non - null point of reference for resumption <nl> + return ! last . isNull ( ) ; <nl> + } <nl> + virtual bool tailing ( ) { <nl> + return state ! = Normal ; <nl> + } <nl> + } ; <nl> + <nl> + / * used for order { $ natural : - 1 } * / <nl> + class ReverseCursor : public BasicCursor { <nl> + public : <nl> + bool advance ( ) { <nl> + if ( eof ( ) ) <nl> + return false ; <nl> + Record * r = _current ( ) ; <nl> + last = curr ; <nl> + curr = r - > getPrev ( curr ) ; <nl> + return ok ( ) ; <nl> + } <nl> + <nl> + ReverseCursor ( DiskLoc dl ) : BasicCursor ( dl ) { } <nl> + ReverseCursor ( ) { } <nl> + virtual const char * toString ( ) { return " ReverseCursor " ; } <nl> + } ; <nl> + <nl> mmm a / db / db . cpp <nl> ppp b / db / db . cpp <nl> void receivedQuery ( DbResponse & dbresponse , / * AbstractMessagingPort & dbMsgPort , * <nl> QueryResult * msgdata ; <nl> <nl> try { <nl> - msgdata = runQuery ( m , ns , ntoskip , ntoreturn , query , fields , ss ) ; <nl> + msgdata = runQuery ( m , ns , ntoskip , ntoreturn , query , fields , ss , m . data - > dataAsInt ( ) ) ; <nl> } <nl> catch ( AssertionException ) { <nl> ss < < " exception " ; <nl> mmm a / db / db . vcproj <nl> ppp b / db / db . vcproj <nl> <nl> RelativePath = " . \ clientcursor . h " <nl> > <nl> < / File > <nl> + < File <nl> + RelativePath = " . \ cursor . h " <nl> + > <nl> + < / File > <nl> < File <nl> RelativePath = " . \ db . h " <nl> > <nl> mmm a / db / dbclient . cpp <nl> ppp b / db / dbclient . cpp <nl> auto_ptr < DBClientCursor > DBClientConnection : : query ( const char * ns , JSObj query , <nl> / * - - DBClientCursor mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm - * / <nl> <nl> void DBClientCursor : : requestMore ( ) { <nl> - cout < < " TEMP REQUESTMORE " < < endl ; <nl> - <nl> assert ( cursorId & & pos = = nReturned ) ; <nl> <nl> BufBuilder b ; <nl> cout < < " TEMP REQUESTMORE " < < endl ; <nl> <nl> void DBClientCursor : : dataReceived ( ) { <nl> QueryResult * qr = ( QueryResult * ) m - > data ; <nl> - if ( qr - > resultOptions ( ) & ResultOption_CursorNotFound ) { <nl> + if ( qr - > resultFlags ( ) & ResultFlag_CursorNotFound ) { <nl> / / cursor id no longer valid at the server . <nl> assert ( qr - > cursorId = = 0 ) ; <nl> cursorId = 0 ; / / 0 indicates no longer valid ( dead ) <nl> void DBClientCursor : : dataReceived ( ) { <nl> nReturned = qr - > nReturned ; <nl> pos = 0 ; <nl> data = qr - > data ( ) ; <nl> - assert ( nReturned | | cursorId = = 0 ) ; <nl> + / * this assert would fire the way we currently work : <nl> + assert ( nReturned | | cursorId = = 0 ) ; <nl> + * / <nl> } <nl> <nl> bool DBClientCursor : : more ( ) { <nl> mmm a / db / dbclient . h <nl> ppp b / db / dbclient . h <nl> enum { <nl> <nl> like any " latent cursor " , the cursor may become invalid at some point - - for example if that <nl> final object it references were deleted . Thus , you should be prepared to requery if you get back <nl> - ResultOption_CursorNotFound . <nl> + ResultFlag_CursorNotFound . <nl> * / <nl> Option_CursorTailable = 2 <nl> } ; <nl> struct QueryResult : public MsgData { <nl> int startingFrom ; <nl> int nReturned ; <nl> const char * data ( ) { return ( char * ) ( ( ( int * ) & nReturned ) + 1 ) ; } <nl> - int resultOptions ( ) const { <nl> - return * ( ( int * ) _data ) ; <nl> - } <nl> + int & resultFlags ( ) { return dataAsInt ( ) ; } <nl> } ; <nl> # pragma pack ( pop ) <nl> <nl> mmm a / db / pdfile . h <nl> ppp b / db / pdfile . h <nl> inline Extent * PhysicalDataFile : : getExtent ( DiskLoc loc ) { <nl> return e ; <nl> } <nl> <nl> - class Cursor { <nl> - public : <nl> - virtual bool ok ( ) = 0 ; <nl> - bool eof ( ) { return ! ok ( ) ; } <nl> - virtual Record * _current ( ) = 0 ; <nl> - virtual JSObj current ( ) = 0 ; <nl> - virtual DiskLoc currLoc ( ) = 0 ; <nl> - virtual bool advance ( ) = 0 ; / * true = ok * / <nl> - <nl> - virtual void aboutToDeleteBucket ( const DiskLoc & b ) { } <nl> - <nl> - / * optional to implement . if implemented , means ' this ' is a prototype * / <nl> - virtual Cursor * clone ( ) { return 0 ; } <nl> - <nl> - virtual bool tempStopOnMiss ( ) { return false ; } <nl> - <nl> - / * called after every query block is iterated - - i . e . between getMore ( ) blocks <nl> - so you can note where we are , if necessary . <nl> - * / <nl> - virtual void noteLocation ( ) { } <nl> - <nl> - / * called before query getmore block is iterated * / <nl> - virtual void checkLocation ( ) { } <nl> - <nl> - virtual const char * toString ( ) { return " abstract ? " ; } <nl> - <nl> - / * used for multikey index traversal to avoid sending back dups . see JSMatcher : : matches ( ) * / <nl> - set < DiskLoc > dups ; <nl> - bool getsetdup ( DiskLoc loc ) { <nl> - / * to save mem only call this when there is risk of dups ( e . g . when ' deep ' / multikey ) * / <nl> - if ( dups . count ( loc ) > 0 ) <nl> - return true ; <nl> - dups . insert ( loc ) ; <nl> - return false ; <nl> - } <nl> - } ; <nl> - <nl> - class BasicCursor : public Cursor { <nl> - public : <nl> - bool ok ( ) { return ! curr . isNull ( ) ; } <nl> - Record * _current ( ) { <nl> - assert ( ok ( ) ) ; <nl> - return curr . rec ( ) ; <nl> - } <nl> - JSObj current ( ) { <nl> - Record * r = _current ( ) ; <nl> - JSObj j ( r ) ; <nl> - return j ; <nl> - } <nl> - virtual DiskLoc currLoc ( ) { return curr ; } <nl> - <nl> - bool advance ( ) { <nl> - if ( eof ( ) ) <nl> - return false ; <nl> - Record * r = _current ( ) ; <nl> - curr = r - > getNext ( curr ) ; <nl> - return ok ( ) ; <nl> - } <nl> - <nl> - BasicCursor ( DiskLoc dl ) : curr ( dl ) { } <nl> - BasicCursor ( ) { } <nl> - virtual const char * toString ( ) { return " BasicCursor " ; } <nl> - <nl> - DiskLoc curr ; <nl> - } ; <nl> - <nl> - / * used for order { $ natural : - 1 } * / <nl> - class ReverseCursor : public BasicCursor { <nl> - public : <nl> - bool advance ( ) { <nl> - if ( eof ( ) ) <nl> - return false ; <nl> - Record * r = _current ( ) ; <nl> - curr = r - > getPrev ( curr ) ; <nl> - return ok ( ) ; <nl> - } <nl> - <nl> - ReverseCursor ( DiskLoc dl ) : BasicCursor ( dl ) { } <nl> - ReverseCursor ( ) { } <nl> - virtual const char * toString ( ) { return " ReverseCursor " ; } <nl> - } ; <nl> + # include " cursor . h " <nl> <nl> inline Record * PhysicalDataFile : : recordAt ( DiskLoc dl ) { return header - > getRecord ( dl ) ; } <nl> <nl> mmm a / db / query . cpp <nl> ppp b / db / query . cpp <nl> int deleteObjects ( const char * ns , JSObj pattern , bool justOne , bool god ) { <nl> c = theDataFileMgr . findAll ( ns ) ; <nl> <nl> Cursor & tempDebug = * c ; <nl> - int temp = 0 ; <nl> - int tempd = 0 ; <nl> - <nl> - DiskLoc _tempDelLoc ; <nl> <nl> while ( c - > ok ( ) ) { <nl> - temp + + ; <nl> - <nl> Record * r = c - > _current ( ) ; <nl> DiskLoc rloc = c - > currLoc ( ) ; <nl> c - > advance ( ) ; / / must advance before deleting as the next ptr will die <nl> int deleteObjects ( const char * ns , JSObj pattern , bool justOne , bool god ) { <nl> assert ( ! deep | | ! c - > getsetdup ( rloc ) ) ; / / can ' t be a dup , we deleted it ! <nl> if ( ! justOne ) <nl> c - > noteLocation ( ) ; <nl> - _tempDelLoc = rloc ; <nl> <nl> theDataFileMgr . deleteRecord ( ns , r , rloc ) ; <nl> nDeleted + + ; <nl> - tempd = temp ; <nl> if ( justOne ) <nl> break ; <nl> c - > checkLocation ( ) ; <nl> int runCount ( const char * ns , JSObj & cmd , string & err ) { <nl> } <nl> <nl> QueryResult * runQuery ( Message & message , const char * ns , int ntoskip , int _ntoreturn , JSObj jsobj , <nl> - auto_ptr < set < string > > filter , stringstream & ss ) <nl> + auto_ptr < set < string > > filter , stringstream & ss , int queryOptions ) <nl> { <nl> time_t t = time ( 0 ) ; <nl> bool wantMore = true ; <nl> QueryResult * runQuery ( Message & message , const char * ns , int ntoskip , int _ntoret <nl> <nl> int nscanned = 0 ; <nl> auto_ptr < Cursor > c = getSpecialCursor ( ns ) ; <nl> + if ( c . get ( ) = = 0 ) <nl> + c = getIndexCursor ( ns , query , order ) ; <nl> + if ( c . get ( ) = = 0 ) <nl> + c = findTableScan ( ns , order ) ; <nl> <nl> - / * try * / { <nl> + while ( c - > ok ( ) ) { <nl> + JSObj js = c - > current ( ) ; <nl> + if ( queryTraceLevel > = 50 ) <nl> + cout < < " checking against : \ n " < < js . toString ( ) < < endl ; <nl> + nscanned + + ; <nl> + bool deep ; <nl> <nl> - if ( c . get ( ) = = 0 ) { <nl> - c = getIndexCursor ( ns , query , order ) ; <nl> - } <nl> - if ( c . get ( ) = = 0 ) { <nl> - / / c = theDataFileMgr . findAll ( ns ) ; <nl> - c = findTableScan ( ns , order ) ; <nl> + if ( ! matcher - > matches ( js , & deep ) ) { <nl> + if ( c - > tempStopOnMiss ( ) ) <nl> + break ; <nl> } <nl> - <nl> - while ( c - > ok ( ) ) { <nl> - JSObj js = c - > current ( ) ; <nl> - if ( queryTraceLevel > = 50 ) <nl> - cout < < " checking against : \ n " < < js . toString ( ) < < endl ; <nl> - nscanned + + ; <nl> - bool deep ; <nl> - <nl> - JSMatcher & debug = * matcher ; <nl> - assert ( debug . getN ( ) < 5000 ) ; <nl> - <nl> - if ( ! matcher - > matches ( js , & deep ) ) { <nl> - if ( c - > tempStopOnMiss ( ) ) <nl> - break ; <nl> + else if ( ! deep | | ! c - > getsetdup ( c - > currLoc ( ) ) ) { / / i . e . , check for dups on deep items only <nl> + / / got a match . <nl> + if ( ntoskip > 0 ) { <nl> + ntoskip - - ; <nl> } <nl> - else if ( ! deep | | ! c - > getsetdup ( c - > currLoc ( ) ) ) { / / i . e . , check for dups on deep items only <nl> - / / got a match . <nl> - if ( ntoskip > 0 ) { <nl> - ntoskip - - ; <nl> + else { <nl> + bool ok = true ; <nl> + assert ( js . objsize ( ) > = 0 ) ; / / defensive for segfaults <nl> + if ( filter . get ( ) ) { <nl> + / / we just want certain fields from the object . <nl> + JSObj x ; <nl> + ok = x . addFields ( js , * filter ) > 0 ; <nl> + if ( ok ) <nl> + b . append ( ( void * ) x . objdata ( ) , x . objsize ( ) ) ; <nl> } <nl> else { <nl> - bool ok = true ; <nl> - assert ( js . objsize ( ) > = 0 ) ; / / defensive for segfaults <nl> - if ( filter . get ( ) ) { <nl> - / / we just want certain fields from the object . <nl> - JSObj x ; <nl> - ok = x . addFields ( js , * filter ) > 0 ; <nl> - if ( ok ) <nl> - b . append ( ( void * ) x . objdata ( ) , x . objsize ( ) ) ; <nl> - } <nl> - else { <nl> - b . append ( ( void * ) js . objdata ( ) , js . objsize ( ) ) ; <nl> - } <nl> - if ( ok ) { <nl> - n + + ; <nl> - if ( ( ntoreturn > 0 & & ( n > = ntoreturn | | b . len ( ) > MaxBytesToReturnToClientAtOnce ) ) | | <nl> - ( ntoreturn = = 0 & & ( b . len ( ) > 1 * 1024 * 1024 | | n > = 101 ) ) ) { <nl> - / * if ntoreturn is zero , we return up to 101 objects . on the subsequent getmore , there <nl> - is only a size limit . The idea is that on a find ( ) where one doesn ' t use much results , <nl> - we don ' t return much , but once getmore kicks in , we start pushing significant quantities . <nl> - <nl> - The n limit ( vs . size ) is important when someone fetches only one small field from big <nl> - objects , which causes massive scanning server - side . <nl> - * / <nl> - / * if only 1 requested , no cursor saved for efficiency . . . we assume it is findOne ( ) * / <nl> - if ( wantMore & & ntoreturn ! = 1 ) { <nl> + b . append ( ( void * ) js . objdata ( ) , js . objsize ( ) ) ; <nl> + } <nl> + if ( ok ) { <nl> + n + + ; <nl> + if ( ( ntoreturn > 0 & & ( n > = ntoreturn | | b . len ( ) > MaxBytesToReturnToClientAtOnce ) ) | | <nl> + ( ntoreturn = = 0 & & ( b . len ( ) > 1 * 1024 * 1024 | | n > = 101 ) ) ) { <nl> + / * if ntoreturn is zero , we return up to 101 objects . on the subsequent getmore , there <nl> + is only a size limit . The idea is that on a find ( ) where one doesn ' t use much results , <nl> + we don ' t return much , but once getmore kicks in , we start pushing significant quantities . <nl> + <nl> + The n limit ( vs . size ) is important when someone fetches only one small field from big <nl> + objects , which causes massive scanning server - side . <nl> + * / <nl> + / * if only 1 requested , no cursor saved for efficiency . . . we assume it is findOne ( ) * / <nl> + if ( wantMore & & ntoreturn ! = 1 ) { <nl> + if ( useCursors ) { <nl> c - > advance ( ) ; <nl> - if ( c - > ok ( ) & & useCursors ) { <nl> + if ( c - > ok ( ) ) { <nl> / / more . . . so save a cursor <nl> ClientCursor * cc = new ClientCursor ( ) ; <nl> cc - > c = c ; <nl> assert ( debug . getN ( ) < 5000 ) ; <nl> cc - > updateLocation ( ) ; <nl> } <nl> } <nl> - break ; <nl> - } <nl> + } <nl> + break ; <nl> } <nl> } <nl> } <nl> - c - > advance ( ) ; <nl> } <nl> + c - > advance ( ) ; <nl> + } <nl> <nl> - if ( client - > profile ) <nl> - ss < < " nscanned : " < < nscanned < < ' ' ; <nl> + if ( cursorid = = 0 & & ( queryOptions & Option_CursorTailable ) & & c - > tailable ( ) ) { <nl> + c - > setAtTail ( ) ; <nl> + ClientCursor * cc = new ClientCursor ( ) ; <nl> + cc - > c = c ; <nl> + cursorid = cc - > cursorid ; <nl> + DEV cout < < " query has no more but tailable , cursorid : " < < cursorid < < endl ; <nl> + cc - > matcher = matcher ; <nl> + cc - > ns = ns ; <nl> + cc - > pos = n ; <nl> + cc - > filter = filter ; <nl> + cc - > originalMessage = message ; <nl> + cc - > updateLocation ( ) ; <nl> } <nl> + <nl> + if ( client - > profile ) <nl> + ss < < " nscanned : " < < nscanned < < ' ' ; <nl> } <nl> <nl> QueryResult * qr = ( QueryResult * ) b . buf ( ) ; <nl> QueryResult * getMore ( const char * ns , int ntoreturn , long long cursorid ) { <nl> <nl> b . skip ( sizeof ( QueryResult ) ) ; <nl> <nl> + int resultFlags = 0 ; <nl> int start = 0 ; <nl> int n = 0 ; <nl> <nl> if ( ! cc ) { <nl> DEV log ( ) < < " getMore : cursorid not found " < < ns < < " " < < cursorid < < endl ; <nl> cursorid = 0 ; <nl> + resultFlags = ResultFlag_CursorNotFound ; <nl> } <nl> else { <nl> start = cc - > pos ; <nl> Cursor * c = cc - > c . get ( ) ; <nl> c - > checkLocation ( ) ; <nl> + c - > tailResume ( ) ; <nl> while ( 1 ) { <nl> if ( ! c - > ok ( ) ) { <nl> done : <nl> - / / done ! kill cursor . <nl> + if ( c - > tailing ( ) ) { <nl> + c - > setAtTail ( ) ; <nl> + break ; <nl> + } <nl> DEV log ( ) < < " getmore : last batch , erasing cursor " < < cursorid < < endl ; <nl> bool ok = ClientCursor : : erase ( cursorid ) ; <nl> assert ( ok ) ; <nl> QueryResult * getMore ( const char * ns , int ntoreturn , long long cursorid ) { <nl> if ( ( ntoreturn > 0 & & ( n > = ntoreturn | | b . len ( ) > MaxBytesToReturnToClientAtOnce ) ) | | <nl> ( ntoreturn = = 0 & & b . len ( ) > 1 * 1024 * 1024 ) ) { <nl> c - > advance ( ) ; <nl> + if ( c - > tailing ( ) & & ! c - > ok ( ) ) <nl> + c - > setAtTail ( ) ; <nl> cc - > pos + = n ; <nl> - cc - > updateLocation ( ) ; <nl> + / / cc - > updateLocation ( ) ; <nl> break ; <nl> } <nl> } <nl> QueryResult * getMore ( const char * ns , int ntoreturn , long long cursorid ) { <nl> } <nl> c - > advance ( ) ; <nl> } <nl> + cc - > updateLocation ( ) ; <nl> } <nl> <nl> QueryResult * qr = ( QueryResult * ) b . buf ( ) ; <nl> - qr - > cursorId = cursorid ; <nl> - qr - > startingFrom = start ; <nl> qr - > len = b . len ( ) ; <nl> - / / qr - > reserved = 0 ; <nl> qr - > operation = opReply ; <nl> + qr - > resultFlags ( ) = resultFlags ; <nl> + qr - > cursorId = cursorid ; <nl> + qr - > startingFrom = start ; <nl> qr - > nReturned = n ; <nl> b . decouple ( ) ; <nl> <nl> mmm a / db / query . h <nl> ppp b / db / query . h <nl> <nl> / * db response format <nl> <nl> Query or GetMore : / / see struct QueryResult <nl> - int resultOptions = 0 ; <nl> + int resultFlags = 0 ; <nl> int64 cursorID ; <nl> int startingFrom ; <nl> int nReturned ; / / 0 = infinity <nl> list of marshalled JSObjects ; <nl> * / <nl> <nl> - / * the field ' resultOptions ' above * / <nl> + / * the field ' resultFlags ' above * / <nl> enum { <nl> / * returned , with zero results , when getMore is called but the cursor id is not valid at the server . * / <nl> - ResultOption_CursorNotFound = 1 <nl> + ResultFlag_CursorNotFound = 1 <nl> } ; <nl> <nl> / / grab struct QueryResult from : <nl> QueryResult * getMore ( const char * ns , int ntoreturn , long long cursorid ) ; <nl> / / caller must free ( ) returned QueryResult . <nl> QueryResult * runQuery ( Message & , const char * ns , int ntoskip , int ntoreturn , <nl> JSObj j , auto_ptr < set < string > > fieldFilter , <nl> - stringstream & ) ; <nl> + stringstream & , int queryOptions ) ; <nl> <nl> void updateObjects ( const char * ns , JSObj updateobj , JSObj pattern , bool upsert , stringstream & ss ) ; <nl> <nl> mmm a / grid / message . h <nl> ppp b / grid / message . h <nl> struct MsgData { <nl> int operation ; <nl> char _data [ 4 ] ; <nl> <nl> + int & dataAsInt ( ) { return * ( ( int * ) _data ) ; } <nl> + <nl> int dataLen ( ) ; / / len without header <nl> } ; <nl> const int MsgDataHeaderSize = sizeof ( MsgData ) - 4 ; <nl>
tailable cursors working
mongodb/mongo
edcee6aaa89618642f3be46f552abc3b6e8e938f
2008-08-13T16:17:18Z
mmm a / utils / buildbot - release - notes . txt <nl> ppp b / utils / buildbot - release - notes . txt <nl> <nl> 2014 - 01 - 08 <nl> + mmmmmmmmm - <nl> <nl> * The " static " keyword changed to " type " . One can now define " type <nl> functions " and " type variables " which are functions and variables <nl>
Add missing underline .
apple/swift
6881e29ac90a6cc26d3b89481afbdb41b629cb14
2014-01-08T05:33:45Z
mmm a / hphp / runtime / base / string_data . cpp <nl> ppp b / hphp / runtime / base / string_data . cpp <nl> Array StringData : : GetConstants ( ) { <nl> return a ; <nl> } <nl> <nl> - void StringData : : initLiteral ( const char * data ) { <nl> - return initLiteral ( data , strlen ( data ) ) ; <nl> - } <nl> - <nl> - void StringData : : initLiteral ( const char * data , int len ) { <nl> - if ( uint32_t ( len ) > MaxSize ) { <nl> - throw InvalidArgumentException ( " len > 2 ^ 31 - 2 " , len ) ; <nl> - } <nl> - / / Do not copy literals , this StringData can have a shorter lifetime than <nl> - / / the literal , and the client can count on this - > data ( ) giving back <nl> - / / the literal ptr with the longer lifetime . Sketchy ! <nl> - m_hash = 0 ; <nl> - m_count = 0 ; <nl> - m_len = len ; <nl> - m_cdata = data ; <nl> - m_big . cap = len | IsLiteral ; <nl> - assert ( checkSane ( ) ) ; <nl> - } <nl> - <nl> void StringData : : enlist ( ) { <nl> assert ( isShared ( ) ) ; <nl> SweepNode & head = MemoryManager : : TheMemoryManager ( ) - > m_strings ; <nl> void StringData : : append ( const char * s , int len ) { <nl> / / TODO : t1122987 : in any of the cases below where we need a bigger buffer , <nl> / / we can probably assume we ' re in a concat - loop and pick a good buffer <nl> / / size to avoid O ( N ^ 2 ) copying cost . <nl> - if ( isShared ( ) | | isLiteral ( ) ) { <nl> + if ( isShared ( ) ) { <nl> / / buffer is immutable , don ' t modify it . <nl> StringSlice r = slice ( ) ; <nl> char * newdata = smart_concat ( r . ptr , r . len , s , len ) ; <nl> StringData * StringData : : copy ( bool sharedMemory / * = false * / ) const { <nl> / / which will be freed at the end of the request , and so must be <nl> / / copied . <nl> return new StringData ( data ( ) , size ( ) , CopyMalloc ) ; <nl> - } else { <nl> - if ( isLiteral ( ) ) { <nl> - return NEW ( StringData ) ( data ( ) , size ( ) , AttachLiteral ) ; <nl> - } <nl> - return NEW ( StringData ) ( data ( ) , size ( ) , CopyString ) ; <nl> } <nl> + return NEW ( StringData ) ( data ( ) , size ( ) , CopyString ) ; <nl> } <nl> <nl> MutableSlice StringData : : escalate ( uint32_t cap ) { <nl> StringData * StringData : : Escalate ( StringData * in ) { <nl> void StringData : : dump ( ) const { <nl> StringSlice s = slice ( ) ; <nl> <nl> - printf ( " StringData ( % d ) ( % s % s % s % d ) : [ " , m_count , <nl> - isLiteral ( ) ? " literal " : " " , <nl> + printf ( " StringData ( % d ) ( % s % s % d ) : [ " , m_count , <nl> isShared ( ) ? " shared " : " " , <nl> isStatic ( ) ? " static " : " " , <nl> s . len ) ; <nl> mmm a / hphp / runtime / base / string_data . h <nl> ppp b / hphp / runtime / base / string_data . h <nl> struct Slice { <nl> typedef Slice < const char > StringSlice ; <nl> typedef Slice < char > MutableSlice ; <nl> <nl> - / / const char * points to a string which must remain valid for the lifetime <nl> - / / of the StringData . It is fragile to rely on StringData . data ( ) returning <nl> - / / the same pointer after construction - - this invariant will probably be <nl> - / / deprecated to enable copying of small strings . <nl> - enum AttachLiteralMode { AttachLiteral } ; <nl> - <nl> / / Aggressively copy small strings and free the passed - in buffer immediately ; <nl> / / otherwise keep the buffer for long strings , and free it when the string <nl> / / is mutated or released . <nl> enum AttachStringMode { AttachString } ; <nl> / / const char * points to client - owned memory , StringData will copy it <nl> / / at construct - time using smart_malloc . This is only ok when the StringData <nl> / / itself was smart - allocated . <nl> - enum CopyStringMode { CopyString } ; <nl> + enum CopyStringMode { CopyString , AttachLiteral } ; <nl> <nl> / / reserve space for buffer that will be filled in by client . <nl> enum ReserveStringMode { ReserveString } ; <nl> enum CopyMallocMode { CopyMalloc } ; <nl> * big : m_data : 8 , m_len : 4 , m_count : 4 , m_hash : 4 , <nl> * junk [ 12 ] , node : 16 , shared : 8 , cap : 8 <nl> * <nl> - * If the format is IsLiteral or IsShared , we always use the " big " layout . <nl> + * If the format is IsShared , we always use the " big " layout . <nl> * resemblences to fbstring are not accidental . <nl> * / <nl> class StringData { <nl> class StringData { <nl> <nl> enum Format { <nl> IsSmall = 0 , / / short str overlaps m_big <nl> - IsLiteral = 0x1000000000000000 , / / literal string <nl> - IsShared = 0x2000000000000000 , / / shared memory string <nl> - IsMalloc = 0x3000000000000000 , / / m_big . data is malloc ' d <nl> - IsSmart = 0x4000000000000000 , / / m_big . data is smart_malloc ' d <nl> + IsShared = 0x1000000000000000 , / / shared memory string <nl> + IsMalloc = 0x2000000000000000 , / / m_big . data is malloc ' d <nl> + IsSmart = 0x3000000000000000 , / / m_big . data is smart_malloc ' d <nl> IsMask = 0xF000000000000000 <nl> } ; <nl> <nl> class StringData { <nl> * is actually only for SmartAllocator to pre - allocate the objects . <nl> * / <nl> explicit StringData ( const char * data ) { <nl> - initLiteral ( data ) ; <nl> - } <nl> - StringData ( const char * data , AttachLiteralMode ) { <nl> - initLiteral ( data ) ; <nl> + initCopy ( data ) ; <nl> } <nl> StringData ( const char * data , AttachStringMode ) { <nl> initAttach ( data ) ; <nl> class StringData { <nl> initCopy ( data ) ; <nl> } <nl> <nl> - StringData ( const char * data , int len , AttachLiteralMode ) { <nl> - initLiteral ( data , len ) ; <nl> - } <nl> StringData ( const char * data , int len , AttachStringMode ) { <nl> initAttach ( data , len ) ; <nl> } <nl> class StringData { <nl> return StringSlice ( m_data , m_len ) ; <nl> } <nl> bool empty ( ) const { return size ( ) = = 0 ; } <nl> - bool isLiteral ( ) const { return format ( ) = = IsLiteral ; } <nl> bool isShared ( ) const { return format ( ) = = IsShared ; } <nl> bool isSmall ( ) const { return format ( ) = = IsSmall ; } <nl> - bool isImmutable ( ) const { <nl> - Format f = format ( ) ; <nl> - return f = = IsLiteral | | f = = IsShared | | isStatic ( ) ; <nl> - } <nl> + bool isImmutable ( ) const { return isStatic ( ) | | isShared ( ) ; } <nl> DataType isNumericWithVal ( int64_t & lval , double & dval , int allow_errors ) const ; <nl> bool isNumeric ( ) const ; <nl> bool isInteger ( ) const ; <nl> class StringData { <nl> / * * <nl> * Helpers . <nl> * / <nl> - void initLiteral ( const char * data ) ; <nl> void initAttach ( const char * data ) ; <nl> void initCopy ( const char * data ) ; <nl> - void initLiteral ( const char * data , int len ) ; <nl> void initAttach ( const char * data , int len ) ; <nl> void initCopy ( const char * data , int len ) ; <nl> void initMalloc ( const char * data , int len ) ; <nl> mmm a / hphp / runtime / base / type_string . h <nl> ppp b / hphp / runtime / base / type_string . h <nl> class String : protected SmartPtr < StringData > { <nl> m_px = NEW ( StringData ) ( s . data ( ) , s . size ( ) , CopyString ) ; <nl> m_px - > setRefCount ( 1 ) ; <nl> } <nl> - / / attach to null terminated string literal <nl> - String ( const char * s , AttachLiteralMode mode ) { <nl> - if ( s ) { <nl> - m_px = NEW ( StringData ) ( s , mode ) ; <nl> - m_px - > setRefCount ( 1 ) ; <nl> - } <nl> - } <nl> / / attach to null terminated malloc ' ed string , maybe free it now . <nl> String ( const char * s , AttachStringMode mode ) { <nl> if ( s ) { <nl> class String : protected SmartPtr < StringData > { <nl> m_px - > setRefCount ( 1 ) ; <nl> } <nl> } <nl> - / / attach to binary string literal <nl> - String ( const char * s , int length , AttachLiteralMode mode ) { <nl> - if ( s ) { <nl> - m_px = NEW ( StringData ) ( s , length , mode ) ; <nl> - m_px - > setRefCount ( 1 ) ; <nl> - } <nl> - } <nl> / / attach to binary malloc ' ed string <nl> String ( const char * s , int length , AttachStringMode mode ) { <nl> if ( s ) { <nl> class String : protected SmartPtr < StringData > { <nl> bool isValidVariableName ( ) const { <nl> return m_px ? m_px - > isValidVariableName ( ) : false ; <nl> } <nl> - bool isLiteral ( ) const { <nl> - return m_px ? m_px - > isLiteral ( ) : true ; <nl> - } <nl> <nl> / * * <nl> * Take a sub - string from start with specified length . Note , read <nl>
Eliminate AttachLiteralMode
facebook/hhvm
2e293e471e0870fa1cd755cc1164e38f2153f53b
2013-07-24T17:35:44Z
new file mode 100644 <nl> index 0000000000 . . 793eb0a664 <nl> mmm / dev / null <nl> ppp b / code / bit - manipulation / bitDivision . js <nl> <nl> + / * Part of Cosmos by OpenGenus Foundation * / <nl> + <nl> + function divide ( dividend , divisor ) { <nl> + <nl> + if ( divisor = = = 0 ) { <nl> + return " undefined " ; <nl> + } <nl> + <nl> + const isPositive = ( dividend > 0 ) & & ( divisor > 0 ) ; <nl> + dividend = Math . abs ( dividend ) ; <nl> + divisor = Math . abs ( divisor ) ; <nl> + <nl> + answer = 0 <nl> + while ( dividend > = divisor ) { <nl> + let temp = divisor ; <nl> + let i = 1 ; <nl> + while ( dividend > = temp ) { <nl> + dividend - = temp ; <nl> + answer + = i ; <nl> + i < < = 1 ; <nl> + temp < < = 1 ; <nl> + } <nl> + } <nl> + <nl> + if ( ! isPositive ) { <nl> + answer = - answer ; <nl> + } <nl> + <nl> + return answer ; <nl> + } <nl> + <nl> + <nl> + function test ( ) { <nl> + <nl> + testCases = [ <nl> + [ 9 , 4 ] , <nl> + [ - 10 , 3 ] , <nl> + [ 103 , - 10 ] , <nl> + [ - 9 , - 4 ] , <nl> + [ 0 , - 3 ] , <nl> + [ 2 , 0 ] <nl> + ] ; <nl> + <nl> + testCases . forEach ( test = > <nl> + console . log ( ` $ { test [ 0 ] } / $ { test [ 1 ] } = $ { divide ( test [ 0 ] , test [ 1 ] ) } ` ) ) ; <nl> + } <nl> + <nl> + <nl> + test ( ) ; <nl>
Create bit division for javascript
OpenGenus/cosmos
b094bfd678211e0b69ce79008a796c9aa280fd76
2017-10-02T15:43:26Z
mmm a / dbms / tests / queries / 0_stateless / 01034_prewhere_max_parallel_replicas . sql <nl> ppp b / dbms / tests / queries / 0_stateless / 01034_prewhere_max_parallel_replicas . sql <nl> <nl> drop table if exists test_max_parallel_replicas_lr ; <nl> <nl> + - - If you wonder why the table is named with " _lr " suffix in this test . <nl> + - - No reason . Actually it is the name of the table in Yandex . Market and they provided this test case for us . <nl> + <nl> CREATE TABLE test_max_parallel_replicas_lr ( timestamp UInt64 ) ENGINE = MergeTree ORDER BY ( intHash32 ( timestamp ) ) SAMPLE BY intHash32 ( timestamp ) ; <nl> INSERT INTO test_max_parallel_replicas_lr select number as timestamp from system . numbers limit 100 ; <nl> <nl> SET max_parallel_replicas = 2 ; <nl> select count ( ) FROM remote ( ' 127 . 0 . 0 . { 2 | 3 } ' , currentDatabase ( ) , test_max_parallel_replicas_lr ) PREWHERE timestamp > 0 ; <nl> + <nl> + drop table test_max_parallel_replicas_lr ; <nl>
Update 01034_prewhere_max_parallel_replicas . sql
ClickHouse/ClickHouse
23f848993d392586c70f2d940f204d6f578477cb
2019-11-26T06:27:25Z
mmm a / src / key_io . cpp <nl> ppp b / src / key_io . cpp <nl> class DestinationEncoder : public boost : : static_visitor < std : : string > <nl> std : : string operator ( ) ( const WitnessV0KeyHash & id ) const <nl> { <nl> std : : vector < unsigned char > data = { 0 } ; <nl> - ConvertBits < 8 , 5 , true > ( data , id . begin ( ) , id . end ( ) ) ; <nl> + ConvertBits < 8 , 5 , true > ( [ & ] ( unsigned char c ) { data . push_back ( c ) ; } , id . begin ( ) , id . end ( ) ) ; <nl> return bech32 : : Encode ( m_params . Bech32HRP ( ) , data ) ; <nl> } <nl> <nl> std : : string operator ( ) ( const WitnessV0ScriptHash & id ) const <nl> { <nl> std : : vector < unsigned char > data = { 0 } ; <nl> - ConvertBits < 8 , 5 , true > ( data , id . begin ( ) , id . end ( ) ) ; <nl> + ConvertBits < 8 , 5 , true > ( [ & ] ( unsigned char c ) { data . push_back ( c ) ; } , id . begin ( ) , id . end ( ) ) ; <nl> return bech32 : : Encode ( m_params . Bech32HRP ( ) , data ) ; <nl> } <nl> <nl> class DestinationEncoder : public boost : : static_visitor < std : : string > <nl> return { } ; <nl> } <nl> std : : vector < unsigned char > data = { ( unsigned char ) id . version } ; <nl> - ConvertBits < 8 , 5 , true > ( data , id . program , id . program + id . length ) ; <nl> + ConvertBits < 8 , 5 , true > ( [ & ] ( unsigned char c ) { data . push_back ( c ) ; } , id . program , id . program + id . length ) ; <nl> return bech32 : : Encode ( m_params . Bech32HRP ( ) , data ) ; <nl> } <nl> <nl> CTxDestination DecodeDestination ( const std : : string & str , const CChainParams & par <nl> / / Bech32 decoding <nl> int version = bech . second [ 0 ] ; / / The first 5 bit symbol is the witness version ( 0 - 16 ) <nl> / / The rest of the symbols are converted witness program bytes . <nl> - if ( ConvertBits < 5 , 8 , false > ( data , bech . second . begin ( ) + 1 , bech . second . end ( ) ) ) { <nl> + if ( ConvertBits < 5 , 8 , false > ( [ & ] ( unsigned char c ) { data . push_back ( c ) ; } , bech . second . begin ( ) + 1 , bech . second . end ( ) ) ) { <nl> if ( version = = 0 ) { <nl> { <nl> WitnessV0KeyHash keyid ; <nl> mmm a / src / utilstrencodings . h <nl> ppp b / src / utilstrencodings . h <nl> bool ParseFixedPoint ( const std : : string & val , int decimals , int64_t * amount_out ) ; <nl> <nl> / * * Convert from one power - of - 2 number base to another . * / <nl> template < int frombits , int tobits , bool pad , typename O , typename I > <nl> - bool ConvertBits ( O & out , I it , I end ) { <nl> + bool ConvertBits ( const O & outfn , I it , I end ) { <nl> size_t acc = 0 ; <nl> size_t bits = 0 ; <nl> constexpr size_t maxv = ( 1 < < tobits ) - 1 ; <nl> bool ConvertBits ( O & out , I it , I end ) { <nl> bits + = frombits ; <nl> while ( bits > = tobits ) { <nl> bits - = tobits ; <nl> - out . push_back ( ( acc > > bits ) & maxv ) ; <nl> + outfn ( ( acc > > bits ) & maxv ) ; <nl> } <nl> + + it ; <nl> } <nl> if ( pad ) { <nl> - if ( bits ) out . push_back ( ( acc < < ( tobits - bits ) ) & maxv ) ; <nl> + if ( bits ) outfn ( ( acc < < ( tobits - bits ) ) & maxv ) ; <nl> } else if ( bits > = frombits | | ( ( acc < < ( tobits - bits ) ) & maxv ) ) { <nl> return false ; <nl> } <nl>
Generalize ConvertBits
bitcoin/bitcoin
3296a3bb7fc0a6c47b60c79e968dbf8175d6b716
2018-03-07T04:28:08Z
mmm a / src / ProtocolBuffers . Serialization / Extensions . cs <nl> ppp b / src / ProtocolBuffers . Serialization / Extensions . cs <nl> <nl> using System . IO ; <nl> using System . Xml ; <nl> using Google . ProtocolBuffers . Serialization ; <nl> + using Google . ProtocolBuffers . Serialization . Http ; <nl> <nl> namespace Google . ProtocolBuffers <nl> { <nl> public static string ToXml ( this IMessageLite message , string rootElementName ) <nl> return w . ToString ( ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Writes the message instance to the stream using the content type provided <nl> + / / / < / summary > <nl> + / / / < param name = " message " > An instance of a message < / param > <nl> + / / / < param name = " options " > Options specific to writing this message and / or content type < / param > <nl> + / / / < param name = " contentType " > The mime type of the content to be written < / param > <nl> + / / / < param name = " output " > The stream to write the message to < / param > <nl> + public static void WriteTo ( this IMessageLite message , MessageFormatOptions options , string contentType , Stream output ) <nl> + { <nl> + ICodedOutputStream codedOutput = MessageFormatFactory . CreateOutputStream ( options , contentType , output ) ; <nl> + <nl> + / / Output the appropriate message preamble <nl> + codedOutput . WriteMessageStart ( ) ; <nl> + <nl> + / / Write the message content to the output <nl> + message . WriteTo ( codedOutput ) ; <nl> + <nl> + / / Write the closing message fragment <nl> + codedOutput . WriteMessageEnd ( ) ; <nl> + codedOutput . Flush ( ) ; <nl> + } <nl> + <nl> # endregion <nl> # region IBuilderLite Extensions <nl> / / / < summary > <nl> public static string ToXml ( this IMessageLite message , string rootElementName ) <nl> . Merge ( rootElementName , builder , extensionRegistry ) ; <nl> } <nl> <nl> + / / / < summary > <nl> + / / / Merges the message from the input stream based on the contentType provided <nl> + / / / < / summary > <nl> + / / / < typeparam name = " TBuilder " > A type derived from IBuilderLite < / typeparam > <nl> + / / / < param name = " builder " > An instance of a message builder < / param > <nl> + / / / < param name = " options " > Options specific to reading this message and / or content type < / param > <nl> + / / / < param name = " contentType " > The mime type of the input stream content < / param > <nl> + / / / < param name = " input " > The stream to read the message from < / param > <nl> + / / / < returns > The same builder instance that was supplied in the builder parameter < / returns > <nl> + public static TBuilder MergeFrom < TBuilder > ( this TBuilder builder , MessageFormatOptions options , string contentType , Stream input ) where TBuilder : IBuilderLite <nl> + { <nl> + ICodedInputStream codedInput = MessageFormatFactory . CreateInputStream ( options , contentType , input ) ; <nl> + codedInput . ReadMessageStart ( ) ; <nl> + builder . WeakMergeFrom ( codedInput , options . ExtensionRegistry ) ; <nl> + codedInput . ReadMessageEnd ( ) ; <nl> + return builder ; <nl> + } <nl> + <nl> + # endregion <nl> + # region IRpcServerStub Extensions <nl> + <nl> + / / / < summary > <nl> + / / / Used to implement a service endpoint on an HTTP server . This works with services generated with the <nl> + / / / service_generator_type option set to IRPCDISPATCH . <nl> + / / / < / summary > <nl> + / / / < param name = " stub " > The service execution stub < / param > <nl> + / / / < param name = " methodName " > The name of the method being invoked < / param > <nl> + / / / < param name = " options " > optional arguments for the format reader / writer < / param > <nl> + / / / < param name = " contentType " > The mime type for the input stream < / param > <nl> + / / / < param name = " input " > The input stream < / param > <nl> + / / / < param name = " responseType " > The mime type for the output stream < / param > <nl> + / / / < param name = " output " > The output stream < / param > <nl> + public static void HttpCallMethod ( this IRpcServerStub stub , string methodName , MessageFormatOptions options , <nl> + string contentType , Stream input , string responseType , Stream output ) <nl> + { <nl> + ICodedInputStream codedInput = MessageFormatFactory . CreateInputStream ( options , contentType , input ) ; <nl> + codedInput . ReadMessageStart ( ) ; <nl> + IMessageLite response = stub . CallMethod ( methodName , codedInput , options . ExtensionRegistry ) ; <nl> + codedInput . ReadMessageEnd ( ) ; <nl> + response . WriteTo ( options , responseType , output ) ; <nl> + } <nl> + <nl> # endregion <nl> } <nl> } <nl> mmm a / src / ProtocolBuffers . Serialization / Http / MessageFormatFactory . cs <nl> ppp b / src / ProtocolBuffers . Serialization / Http / MessageFormatFactory . cs <nl> public static ICodedInputStream CreateInputStream ( MessageFormatOptions options , <nl> <nl> return codedInput ; <nl> } <nl> - <nl> - / / / < summary > <nl> - / / / Merges the message from the input stream based on the contentType provided <nl> - / / / < / summary > <nl> - / / / < typeparam name = " TBuilder " > A type derived from IBuilderLite < / typeparam > <nl> - / / / < param name = " builder " > An instance of a message builder < / param > <nl> - / / / < param name = " options " > Options specific to reading this message and / or content type < / param > <nl> - / / / < param name = " contentType " > The mime type of the input stream content < / param > <nl> - / / / < param name = " input " > The stream to read the message from < / param > <nl> - / / / < returns > The same builder instance that was supplied in the builder parameter < / returns > <nl> - public static TBuilder MergeFrom < TBuilder > ( this TBuilder builder , MessageFormatOptions options , string contentType , Stream input ) where TBuilder : IBuilderLite <nl> - { <nl> - ICodedInputStream codedInput = CreateInputStream ( options , contentType , input ) ; <nl> - codedInput . ReadMessageStart ( ) ; <nl> - builder . WeakMergeFrom ( codedInput , options . ExtensionRegistry ) ; <nl> - codedInput . ReadMessageEnd ( ) ; <nl> - return builder ; <nl> - } <nl> <nl> / / / < summary > <nl> / / / Writes the message instance to the stream using the content type provided <nl> public static ICodedOutputStream CreateOutputStream ( MessageFormatOptions options <nl> return codedOutput ; <nl> } <nl> <nl> - / / / < summary > <nl> - / / / Writes the message instance to the stream using the content type provided <nl> - / / / < / summary > <nl> - / / / < param name = " message " > An instance of a message < / param > <nl> - / / / < param name = " options " > Options specific to writing this message and / or content type < / param > <nl> - / / / < param name = " contentType " > The mime type of the content to be written < / param > <nl> - / / / < param name = " output " > The stream to write the message to < / param > <nl> - public static void WriteTo ( this IMessageLite message , MessageFormatOptions options , string contentType , Stream output ) <nl> - { <nl> - ICodedOutputStream codedOutput = CreateOutputStream ( options , contentType , output ) ; <nl> - <nl> - / / Output the appropriate message preamble <nl> - codedOutput . WriteMessageStart ( ) ; <nl> - <nl> - / / Write the message content to the output <nl> - message . WriteTo ( codedOutput ) ; <nl> - <nl> - / / Write the closing message fragment <nl> - codedOutput . WriteMessageEnd ( ) ; <nl> - codedOutput . Flush ( ) ; <nl> - } <nl> - <nl> private static ICodedInputStream ContentTypeToInputStream ( string contentType , MessageFormatOptions options , Stream input ) <nl> { <nl> contentType = ( contentType ? ? String . Empty ) . Split ( ' ; ' ) [ 0 ] . Trim ( ) ; <nl> deleted file mode 100644 <nl> index 6177d9dbdf . . 0000000000 <nl> mmm a / src / ProtocolBuffers . Serialization / Http / ServiceExtensions . cs <nl> ppp / dev / null <nl> <nl> -  using System . Collections . Generic ; <nl> - using System . Text ; <nl> - using Google . ProtocolBuffers ; <nl> - using System . IO ; <nl> - <nl> - namespace Google . ProtocolBuffers . Serialization . Http <nl> - { <nl> - / / / < summary > <nl> - / / / Extensions for the IRpcServerStub <nl> - / / / < / summary > <nl> - public static class ServiceExtensions <nl> - { <nl> - / / / < summary > <nl> - / / / Used to implement a service endpoint on an HTTP server . This works with services generated with the <nl> - / / / service_generator_type option set to IRPCDISPATCH . <nl> - / / / < / summary > <nl> - / / / < param name = " stub " > The service execution stub < / param > <nl> - / / / < param name = " methodName " > The name of the method being invoked < / param > <nl> - / / / < param name = " options " > optional arguments for the format reader / writer < / param > <nl> - / / / < param name = " contentType " > The mime type for the input stream < / param > <nl> - / / / < param name = " input " > The input stream < / param > <nl> - / / / < param name = " responseType " > The mime type for the output stream < / param > <nl> - / / / < param name = " output " > The output stream < / param > <nl> - public static void HttpCallMethod ( this IRpcServerStub stub , string methodName , MessageFormatOptions options , <nl> - string contentType , Stream input , string responseType , Stream output ) <nl> - { <nl> - ICodedInputStream codedInput = MessageFormatFactory . CreateInputStream ( options , contentType , input ) ; <nl> - codedInput . ReadMessageStart ( ) ; <nl> - IMessageLite response = stub . CallMethod ( methodName , codedInput , options . ExtensionRegistry ) ; <nl> - codedInput . ReadMessageEnd ( ) ; <nl> - response . WriteTo ( options , responseType , output ) ; <nl> - } <nl> - } <nl> - } <nl> mmm a / src / ProtocolBuffers . Serialization / ProtocolBuffers . Serialization . csproj <nl> ppp b / src / ProtocolBuffers . Serialization / ProtocolBuffers . Serialization . csproj <nl> <nl> < Compile Include = " Http \ FormUrlEncodedReader . cs " / > <nl> < Compile Include = " Http \ MessageFormatFactory . cs " / > <nl> < Compile Include = " Http \ MessageFormatOptions . cs " / > <nl> - < Compile Include = " Http \ ServiceExtensions . cs " / > <nl> < Compile Include = " Properties \ AssemblyInfo . cs " / > <nl> < Compile Include = " AbstractReader . cs " / > <nl> < Compile Include = " AbstractTextReader . cs " / > <nl> mmm a / src / ProtocolBuffers . Serialization / ProtocolBuffersLite . Serialization . csproj <nl> ppp b / src / ProtocolBuffers . Serialization / ProtocolBuffersLite . Serialization . csproj <nl> <nl> < Compile Include = " Http \ FormUrlEncodedReader . cs " / > <nl> < Compile Include = " Http \ MessageFormatFactory . cs " / > <nl> < Compile Include = " Http \ MessageFormatOptions . cs " / > <nl> - < Compile Include = " Http \ ServiceExtensions . cs " / > <nl> < Compile Include = " Properties \ AssemblyInfo . cs " / > <nl> < Compile Include = " AbstractReader . cs " / > <nl> < Compile Include = " AbstractTextReader . cs " / > <nl>
Moved all extension methods to a single class / file
protocolbuffers/protobuf
4ad552692f737dc27e0ea7829eb8f771b852013c
2011-10-01T19:45:47Z
mmm a / SConstruct <nl> ppp b / SConstruct <nl> if nix : <nl> env . Append ( CPPFLAGS = " - fno - builtin - memcmp " ) # glibc ' s memcmp is faster than gcc ' s <nl> <nl> env . Append ( CPPDEFINES = " _FILE_OFFSET_BITS = 64 " ) <nl> - env . Append ( CXXFLAGS = " - Wnon - virtual - dtor " ) <nl> + env . Append ( CXXFLAGS = " - Wnon - virtual - dtor - Woverloaded - virtual " ) <nl> env . Append ( LINKFLAGS = " - fPIC - pthread - rdynamic " ) <nl> env . Append ( LIBS = [ ] ) <nl> <nl> mmm a / src / mongo / tools / bridge . cpp <nl> ppp b / src / mongo / tools / bridge . cpp <nl> set < MessagingPort * > ports ; <nl> class MyListener : public Listener { <nl> public : <nl> MyListener ( int port ) : Listener ( " bridge " , " " , port ) { } <nl> - virtual void accepted ( MessagingPort * mp ) { <nl> + virtual void acceptedMP ( MessagingPort * mp ) { <nl> ports . insert ( mp ) ; <nl> Forwarder f ( * mp ) ; <nl> boost : : thread t ( f ) ; <nl> mmm a / src / mongo / util / net / listen . cpp <nl> ppp b / src / mongo / util / net / listen . cpp <nl> namespace mongo { <nl> <nl> <nl> void Listener : : accepted ( boost : : shared_ptr < Socket > psocket ) { <nl> - accepted ( new MessagingPort ( psocket ) ) ; <nl> + acceptedMP ( new MessagingPort ( psocket ) ) ; <nl> } <nl> <nl> - void Listener : : accepted ( MessagingPort * mp ) { <nl> + void Listener : : acceptedMP ( MessagingPort * mp ) { <nl> assert ( ! " You must overwrite one of the accepted methods " ) ; <nl> } <nl> <nl> mmm a / src / mongo / util / net / listen . h <nl> ppp b / src / mongo / util / net / listen . h <nl> namespace mongo { <nl> <nl> / * spawn a thread , etc . , then return * / <nl> virtual void accepted ( boost : : shared_ptr < Socket > psocket ) ; <nl> - virtual void accepted ( MessagingPort * mp ) ; <nl> + virtual void acceptedMP ( MessagingPort * mp ) ; <nl> <nl> const int _port ; <nl> <nl> mmm a / src / mongo / util / net / message_server_port . cpp <nl> ppp b / src / mongo / util / net / message_server_port . cpp <nl> namespace mongo { <nl> pms : : handler = handler ; <nl> } <nl> <nl> - virtual void accepted ( MessagingPort * p ) { <nl> + virtual void acceptedMP ( MessagingPort * p ) { <nl> <nl> if ( ! connTicketHolder . tryAcquire ( ) ) { <nl> log ( ) < < " connection refused because too many open connections : " < < connTicketHolder . used ( ) < < endl ; <nl>
Add - Woverloaded - virtual to CXXFLAGS and fix build SERVER - 4802
mongodb/mongo
74c1ffb05d5d5fc274897deaa57aae3846db0ee7
2012-02-09T22:57:49Z
mmm a / src / compiler / analysis / analysis_result . cpp <nl> ppp b / src / compiler / analysis / analysis_result . cpp <nl> void AnalysisResult : : outputCPPExtClassImpl ( CodeGenerator & cg ) { <nl> } <nl> } <nl> <nl> + ClassScope : : outputCPPHashTableClasses ( cg , merged , classes ) ; <nl> ClassScope : : outputCPPClassVarInitImpl ( cg , merged , classes ) ; <nl> ClassScope : : outputCPPDynamicClassCreateImpl ( cg , merged , classes ) ; <nl> ClassScope : : outputCPPGetCallInfoStaticMethodImpl ( cg , merged , classes ) ; <nl> void AnalysisResult : : outputCPPDynamicClassTables ( <nl> classes . push_back ( cls - > getOriginalName ( ) . c_str ( ) ) ; <nl> classScopes [ cls - > getName ( ) ] . push_back ( cls ) ; <nl> if ( ! cls - > isRedeclaring ( ) ) { <nl> - if ( ! system ) { <nl> - cls - > outputCPPDynamicClassDecl ( cg ) ; <nl> - } <nl> cls - > outputCPPGlobalTableWrappersDecl ( cg , ar ) ; <nl> } <nl> break ; <nl> void AnalysisResult : : outputCPPDynamicClassTables ( <nl> classScopes [ cls - > getName ( ) ] . push_back ( cls ) ; <nl> } <nl> } <nl> + ClassScope : : outputCPPHashTableClasses ( cg , classScopes , classes ) ; <nl> ClassScope : : outputCPPClassVarInitImpl ( cg , classScopes , classes ) ; <nl> ClassScope : : outputCPPDynamicClassCreateImpl ( cg , classScopes , classes ) ; <nl> ClassScope : : outputCPPGetCallInfoStaticMethodImpl ( cg , classScopes , classes ) ; <nl> mmm a / src / compiler / analysis / class_scope . cpp <nl> ppp b / src / compiler / analysis / class_scope . cpp <nl> void ClassScope : : outputCPPDynamicClassImpl ( CodeGenerator & cg , <nl> cg_indentEnd ( " } \ n " ) ; <nl> } <nl> <nl> - void ClassScope : : outputCPPHashTableClassVarInit <nl> + void ClassScope : : outputCPPHashTableClasses <nl> ( CodeGenerator & cg , const StringToClassScopePtrVecMap & classScopes , <nl> const vector < const char * > & classes ) { <nl> bool system = cg . getOutput ( ) = = CodeGenerator : : SystemCPP ; <nl> - ASSERT ( cg . getCurrentIndentation ( ) = = 0 ) ; <nl> - const char text1 [ ] = <nl> - " struct hashNodeCTD { \ n " <nl> - " int64 hash ; \ n " <nl> - " const char * name ; \ n " <nl> - " int64 ptv1 ; \ n " <nl> - " ObjectData * ( * const ptr2 ) ( ) ; \ n " <nl> - " } ; \ n " ; <nl> - <nl> - const char text2 [ ] = <nl> - " # define GET_CS_OFFSET ( n ) " <nl> - " ( ( offsetof ( GlobalVariables , % s # # n ) - " <nl> - " offsetof ( GlobalVariables , tgv_RedeclaredObjectStaticCallbacksConstPtr ) ) / " <nl> - " sizeof ( RedeclaredObjectStaticCallbacksConst * ) ) \ n " <nl> - " inline ALWAYS_INLINE " <nl> - " const ObjectStaticCallbacks * getCallbacks ( \ n " <nl> - " const hashNodeCTD * p , CStrRef s , GlobalVariables * g ) { \ n " <nl> - " int64 off = p - > ptv1 ; \ n " <nl> - " if ( LIKELY ( ! ( off & 1 ) ) ) return ( ( const ObjectStaticCallbacks * ) off ) ; \ n " <nl> - " checkClassExists ( s , g ) ; \ n " <nl> - " if ( LIKELY ( p - > ptr2 ! = 0 ) ) / * volatile class * / return " <nl> - " ( ( const ObjectStaticCallbacks * ) ( off - 1 ) ) ; \ n " <nl> - " / * redeclared class * / \ n " <nl> - " return & g - > tgv_RedeclaredObjectStaticCallbacksConstPtr [ off > > 1 ] - > oscb ; \ n " <nl> - " } \ n " ; <nl> - <nl> - const char text3 [ ] = <nl> - " \ n " <nl> - " static const hashNodeCTD * \ n " <nl> - " findCTD ( CStrRef name ) { \ n " <nl> - " int64 hash = name - > hash ( ) ; \ n " <nl> - " int o = ctdMapTable [ hash & % d ] ; \ n " <nl> - " if ( UNLIKELY ( o < 0 ) ) return NULL ; \ n " <nl> - " const hashNodeCTD * p = & ctdBuckets [ o ] ; \ n " <nl> - " int64 h = p - > hash & ( uint64 ( - 1 ) > > 1 ) ; \ n " <nl> - " do { \ n " <nl> - " if ( h = = hash & & " <nl> - " ( LIKELY ( p - > name = = name . data ( ) ) | | " <nl> - " LIKELY ( ! strcasecmp ( p - > name , name . data ( ) ) ) ) ) return p ; \ n " <nl> - " h = ( + + p ) - > hash ; \ n " <nl> - " } while ( h > = 0 ) ; \ n " <nl> - " return NULL ; \ n " <nl> - " } \ n " ; <nl> - <nl> - JumpTable jt ( cg , classes , true , true , true , true ) ; <nl> - cg_printf ( text1 ) ; <nl> - if ( ! system ) { <nl> - cg_printf ( text2 , Option : : ClassStaticsCallbackPrefix ) ; <nl> - } <nl> - cg_printf ( " static const hashNodeCTD ctdBuckets [ ] = { \ n " ) ; <nl> - <nl> - int64 min64 = - 1 - int64 ( uint64 ( - 1 ) > > 1 ) ; <nl> - vector < int > offsets ; <nl> - int prev = - 1 ; <nl> - for ( int n = 0 ; jt . ready ( ) ; + + n , jt . next ( ) ) { <nl> - int cur = jt . current ( ) ; <nl> - bool changed = false ; <nl> - if ( prev ! = cur ) { <nl> - changed = true ; <nl> - while ( + + prev ! = cur ) { <nl> - offsets . push_back ( - 1 ) ; <nl> + if ( classes . size ( ) ) { <nl> + ASSERT ( cg . getCurrentIndentation ( ) = = 0 ) ; <nl> + const char text1 [ ] = <nl> + " struct hashNodeCTD { \ n " <nl> + " int64 hash ; \ n " <nl> + " int32 flags ; \ n " <nl> + " int32 cdec ; \ n " <nl> + " const char * name ; \ n " <nl> + " int64 ptv1 ; \ n " <nl> + " } ; \ n " ; <nl> + <nl> + const char text2 [ ] = <nl> + " # define GET_CS_OFFSET ( n ) offsetof ( GlobalVariables , % s # # n ) \ n " <nl> + " inline ALWAYS_INLINE " <nl> + " const ObjectStaticCallbacks * getCallbacks ( \ n " <nl> + " const hashNodeCTD * p , CStrRef s ) { \ n " <nl> + " int64 off = p - > ptv1 ; \ n " <nl> + " if ( LIKELY ( ! ( off & 1 ) ) ) return ( ( const ObjectStaticCallbacks * ) off ) ; \ n " <nl> + " DECLARE_GLOBAL_VARIABLES ( g ) ; \ n " <nl> + " checkClassExistsThrow ( s , ( bool * ) ( ( char * ) g + p - > cdec ) ) ; \ n " <nl> + " if ( LIKELY ( ! ( off & 2 ) ) ) / * volatile class * / return " <nl> + " ( ( const ObjectStaticCallbacks * ) ( off - 1 ) ) ; \ n " <nl> + " / * redeclared class * / \ n " <nl> + " return * ( ObjectStaticCallbacks * * ) ( ( char * ) g + ( off - 3 ) ) ; \ n " <nl> + " } \ n " ; <nl> + <nl> + const char text3 [ ] = <nl> + " \ n " <nl> + " static const hashNodeCTD * \ n " <nl> + " findCTD ( CStrRef name ) { \ n " <nl> + " int64 hash = name - > hash ( ) ; \ n " <nl> + " int o = ctdMapTable [ hash & % d ] ; \ n " <nl> + " if ( UNLIKELY ( o < 0 ) ) return NULL ; \ n " <nl> + " const hashNodeCTD * p = & ctdBuckets [ o ] ; \ n " <nl> + " do { \ n " <nl> + " int64 h = p - > hash ; \ n " <nl> + " if ( h = = hash & & " <nl> + " ( LIKELY ( p - > name = = name . data ( ) ) | | " <nl> + " LIKELY ( ! strcasecmp ( p - > name , name . data ( ) ) ) ) ) return p ; \ n " <nl> + " } while ( ! ( p + + - > flags & 1 ) ) ; \ n " <nl> + " return NULL ; \ n " <nl> + " } \ n " ; <nl> + <nl> + JumpTable jt ( cg , classes , true , true , true , true ) ; <nl> + cg_printf ( text1 ) ; <nl> + if ( ! system ) { <nl> + cg_printf ( text2 , Option : : ClassStaticsCallbackPrefix ) ; <nl> + } <nl> + cg_printf ( " static const hashNodeCTD ctdBuckets [ ] = { \ n " ) ; <nl> + <nl> + vector < int > offsets ; <nl> + int prev = - 1 ; <nl> + for ( int n = 0 ; jt . ready ( ) ; + + n , jt . next ( ) ) { <nl> + int cur = jt . current ( ) ; <nl> + if ( prev ! = cur ) { <nl> + while ( + + prev ! = cur ) { <nl> + offsets . push_back ( - 1 ) ; <nl> + } <nl> + offsets . push_back ( n ) ; <nl> } <nl> - offsets . push_back ( n ) ; <nl> + const char * clsName = jt . key ( ) ; <nl> + StringToClassScopePtrVecMap : : const_iterator iterClasses = <nl> + classScopes . find ( clsName ) ; <nl> + ClassScopeRawPtr cls = iterClasses - > second [ 0 ] ; <nl> + cg_printf ( " { 0x % 016llXLL , % d , " , <nl> + hash_string_i ( clsName ) , jt . last ( ) ? 1 : 0 ) ; <nl> + if ( cls - > isVolatile ( ) ) { <nl> + cg_printf ( " offsetof ( GlobalVariables , CDEC ( % s ) ) " , <nl> + CodeGenerator : : FormatLabel ( cls - > getName ( ) ) . c_str ( ) ) ; <nl> + } else { <nl> + cg_printf ( " 0 " ) ; <nl> + } <nl> + cg_printf ( " , \ " % s \ " , " , CodeGenerator : : EscapeLabel ( clsName ) . c_str ( ) ) ; <nl> + if ( cls - > isRedeclaring ( ) ) { <nl> + ASSERT ( ! system ) ; <nl> + cg_printf ( " GET_CS_OFFSET ( % s ) + 3 " , <nl> + CodeGenerator : : FormatLabel ( cls - > getName ( ) ) . c_str ( ) ) ; <nl> + } else { <nl> + string clsFmt = CodeGenerator : : FormatLabel ( clsName ) ; <nl> + cg_printf ( " ( int64 ) & % s % s % s " , <nl> + Option : : ClassStaticsCallbackPrefix , <nl> + clsFmt . c_str ( ) , <nl> + cls - > isVolatile ( ) ? " + 1 " : " " ) ; <nl> + } <nl> + cg_printf ( " } , \ n " ) ; <nl> } <nl> - const char * clsName = jt . key ( ) ; <nl> - StringToClassScopePtrVecMap : : const_iterator iterClasses = <nl> - classScopes . find ( clsName ) ; <nl> - cg_printf ( " { 0x % 016llXLL , \ " % s \ " , " , <nl> - hash_string_i ( clsName ) + ( changed ? min64 : 0 ) , <nl> - CodeGenerator : : EscapeLabel ( clsName ) . c_str ( ) ) ; <nl> - ClassScopeRawPtr cls = iterClasses - > second [ 0 ] ; <nl> - if ( cls - > isRedeclaring ( ) ) { <nl> - ASSERT ( ! system ) ; <nl> - cg_printf ( " GET_CS_OFFSET ( % s ) * 2 + 1 , 0 " , <nl> - CodeGenerator : : FormatLabel ( cls - > getName ( ) ) . c_str ( ) ) ; <nl> - } else { <nl> - string clsFmt = CodeGenerator : : FormatLabel ( clsName ) ; <nl> - cg_printf ( " % s ( int64 ) & % s % s , & % s % s " , <nl> - cls - > isVolatile ( ) ? " 1 + " : " " , <nl> - Option : : ClassStaticsCallbackPrefix , <nl> - clsFmt . c_str ( ) , <nl> - Option : : CreateObjectOnlyPrefix , <nl> - clsFmt . c_str ( ) ) ; <nl> + <nl> + cg_printf ( " } ; \ n " ) ; <nl> + cg_indentBegin ( " static const int ctdMapTable [ ] = { \ n " ) ; <nl> + for ( int i = 0 , e = jt . size ( ) , s = offsets . size ( ) ; i < e ; i + + ) { <nl> + cg_printf ( " % d , " , i < s ? offsets [ i ] : - 1 ) ; <nl> + if ( ( i & 7 ) = = 7 ) cg_printf ( " \ n " ) ; <nl> } <nl> - cg_printf ( " } , \ n " ) ; <nl> - } <nl> + cg_printf ( " \ n " ) ; <nl> + cg_indentEnd ( " } ; \ n " ) ; <nl> <nl> - cg_printf ( " { - 1 , 0 , 0 , 0 } } ; \ n " ) ; <nl> - cg_indentBegin ( " static const int ctdMapTable [ ] = { \ n " ) ; <nl> - for ( int i = 0 , e = jt . size ( ) , s = offsets . size ( ) ; i < e ; i + + ) { <nl> - cg_printf ( " % d , " , i < s ? offsets [ i ] : - 1 ) ; <nl> - if ( ( i & 7 ) = = 7 ) cg_printf ( " \ n " ) ; <nl> + cg_printf ( text3 , jt . size ( ) - 1 ) ; <nl> } <nl> - cg_printf ( " \ n " ) ; <nl> - cg_indentEnd ( " } ; \ n " ) ; <nl> <nl> - cg_printf ( text3 , jt . size ( ) - 1 ) ; <nl> + cg_indentBegin ( " const ObjectStaticCallbacks * " <nl> + " get % s_object_static_callbacks ( CStrRef s ) { \ n " , <nl> + system ? " _builtin " : " " ) ; <nl> + if ( classes . size ( ) ) { <nl> + if ( system ) { <nl> + cg_printf ( " const hashNodeCTD * p = findCTD ( s ) ; \ n " <nl> + " if ( p ) { \ n " <nl> + " return " <nl> + " ( ( const ObjectStaticCallbacks * ) p - > ptv1 ) ; \ n " <nl> + " } \ n " <nl> + " return NULL ; \ n " ) ; <nl> + } else { <nl> + cg_printf ( " const hashNodeCTD * p = findCTD ( s ) ; \ n " <nl> + " if ( ! p ) return get_builtin_object_static_callbacks ( s ) ; \ n " <nl> + " return getCallbacks ( p , s ) ; \ n " ) ; <nl> + } <nl> + } else { <nl> + if ( system ) { <nl> + cg_printf ( " return NULL ; \ n " ) ; <nl> + } else { <nl> + cg_printf ( " return get_builtin_object_static_callbacks ( s ) ; \ n " ) ; <nl> + } <nl> + } <nl> + cg_indentEnd ( " } \ n " ) ; <nl> } <nl> <nl> void ClassScope : : outputCPPClassVarInitImpl <nl> ( CodeGenerator & cg , const StringToClassScopePtrVecMap & classScopes , <nl> const vector < const char * > & classes ) { <nl> bool system = cg . getOutput ( ) = = CodeGenerator : : SystemCPP ; <nl> - if ( classes . size ( ) ) { <nl> - outputCPPHashTableClassVarInit ( cg , classScopes , classes ) ; <nl> - } <nl> + <nl> cg_indentBegin ( " Variant get % s_class_var_init ( CStrRef s , " <nl> " const char * var ) { \ n " , <nl> system ? " _builtin " : " " ) ; <nl> void ClassScope : : outputCPPClassVarInitImpl <nl> cg_indentEnd ( " } \ n " ) ; <nl> } <nl> if ( classes . size ( ) ) { <nl> - if ( system ) { <nl> - cg_printf ( " const hashNodeCTD * p = findCTD ( s ) ; \ n " <nl> - " if ( p ) { \ n " <nl> - " return " <nl> - " ( ( const ObjectStaticCallbacks * ) p - > ptv1 ) - > os_getInit ( var ) ; \ n " <nl> - " } \ n " <nl> - " return throw_missing_class ( s ) ; \ n " ) ; <nl> - } else { <nl> - cg . printDeclareGlobals ( ) ; <nl> - cg_printf ( " const hashNodeCTD * p = findCTD ( s ) ; \ n " <nl> - " if ( ! p ) return get_builtin_class_var_init ( s , var ) ; \ n " <nl> - " return getCallbacks ( p , s , g ) - > os_getInit ( var ) ; \ n " ) ; <nl> - } <nl> + cg_printf ( " const ObjectStaticCallbacks * cwo = " <nl> + " get_ % sobject_static_callbacks ( s ) ; \ n " <nl> + " return LIKELY ( cwo ! = 0 ) ? " <nl> + " cwo - > os_getInit ( var ) : throw_missing_class ( s ) ; \ n " , <nl> + system ? " builtin_ " : " " ) ; <nl> } else { <nl> if ( system ) { <nl> cg_printf ( " return throw_missing_class ( s ) ; \ n " ) ; <nl> void ClassScope : : outputCPPDynamicClassCreateImpl <nl> cg_printf ( " return create_builtin_object_only_no_init ( s , root ) ; \ n " ) ; <nl> } <nl> } else { <nl> - if ( system ) { <nl> - cg_printf ( " const hashNodeCTD * p = findCTD ( s ) ; \ n " <nl> - " if ( p ) { \ n " <nl> - " return p - > ptr2 ( ) ; \ n " <nl> - " } \ n " <nl> - " throw_missing_class ( s ) ; \ n " <nl> - " return 0 ; \ n " ) ; <nl> - } else { <nl> - cg . printDeclareGlobals ( ) ; <nl> - cg_printf ( " const hashNodeCTD * p = findCTD ( s ) ; \ n " <nl> - " if ( ! p ) return create_builtin_object_only_no_init ( s , root ) ; \ n " <nl> - " return getCallbacks ( p , s , g ) - > createOnlyNoInit ( root ) ; \ n " ) ; <nl> - } <nl> + cg_printf ( " const ObjectStaticCallbacks * cwo = " <nl> + " get_ % sobject_static_callbacks ( s ) ; \ n " <nl> + " if ( LIKELY ( cwo ! = 0 ) ) return cwo - > createOnlyNoInit ( root ) ; \ n " <nl> + " throw_missing_class ( s ) ; \ n " <nl> + " return 0 ; \ n " , <nl> + system ? " builtin_ " : " " ) ; <nl> } <nl> cg_indentEnd ( " } \ n " ) ; <nl> / / output create_object_only ( ) <nl> void ClassScope : : outputCPPGetCallInfoStaticMethodImpl ( <nl> cg_indentEnd ( " } \ n " ) ; <nl> } <nl> if ( useHashTable ) { <nl> - if ( system ) { <nl> - cg_printf ( " const hashNodeCTD * p = findCTD ( StrNR ( s ) ) ; \ n " <nl> - " const ObjectStaticCallbacks * osc = p ? " <nl> - " ( const ObjectStaticCallbacks * ) p - > ptv1 : 0 ; \ n " <nl> - " return ObjectStaticCallbacks : : GetCallInfo ( osc , mcp , - 1 ) ; \ n " ) ; <nl> - } else { <nl> - cg . printDeclareGlobals ( ) ; <nl> - cg_printf ( " const hashNodeCTD * p = findCTD ( StrNR ( s ) ) ; \ n " <nl> - " if ( ! p ) return get_call_info_static_method_builtin ( mcp ) ; \ n " <nl> - " return getCallbacks ( p , s , g ) - > os_get_call_info ( mcp , - 1 ) ; \ n " ) ; <nl> - } <nl> + cg_printf ( " const ObjectStaticCallbacks * cwo = " <nl> + " get_ % sobject_static_callbacks ( s ) ; \ n " <nl> + " return ObjectStaticCallbacks : : GetCallInfo ( cwo , mcp , - 1 ) ; \ n " , <nl> + system ? " builtin_ " : " " ) ; <nl> } else { <nl> if ( system ) { <nl> cg_printf ( " return ObjectStaticCallbacks : : GetCallInfo ( 0 , mcp , - 1 ) ; \ n " ) ; <nl> void ClassScope : : outputCPPGetStaticPropertyImpl <nl> const vector < const char * > & classes ) { <nl> bool system = cg . getOutput ( ) = = CodeGenerator : : SystemCPP ; <nl> <nl> - cg_indentBegin ( " const ObjectStaticCallbacks * " <nl> - " get % s_object_static_callbacks ( CStrRef s ) { \ n " , <nl> - system ? " _builtin " : " " ) ; <nl> - if ( classes . size ( ) ) { <nl> - if ( system ) { <nl> - cg_printf ( " const hashNodeCTD * p = findCTD ( s ) ; \ n " <nl> - " if ( p ) { \ n " <nl> - " return " <nl> - " ( ( const ObjectStaticCallbacks * ) p - > ptv1 ) ; \ n " <nl> - " } \ n " <nl> - " return NULL ; \ n " ) ; <nl> - } else { <nl> - cg . printDeclareGlobals ( ) ; <nl> - cg_printf ( " const hashNodeCTD * p = findCTD ( s ) ; \ n " <nl> - " if ( ! p ) return get_builtin_object_static_callbacks ( s ) ; \ n " <nl> - " return getCallbacks ( p , s , g ) ; \ n " ) ; <nl> - } <nl> - } else { <nl> - if ( system ) { <nl> - cg_printf ( " return NULL ; \ n " ) ; <nl> - } else { <nl> - cg_printf ( " return get_builtin_object_static_callbacks ( s ) ; \ n " ) ; <nl> - } <nl> - } <nl> - cg_indentEnd ( " } \ n " ) ; <nl> - <nl> cg_indentBegin ( " Variant get % s_static_property ( CStrRef s , " <nl> " const char * prop ) { \ n " , <nl> system ? " _builtin " : " " ) ; <nl> void ClassScope : : outputCPPGetStaticPropertyImpl <nl> cg_indentEnd ( " } \ n " ) ; <nl> } <nl> <nl> - cg . indentBegin ( " { \ n " ) ; <nl> cg . printf ( " const ObjectStaticCallbacks * cwo = " <nl> " get % s_object_static_callbacks ( s ) ; \ n " , <nl> system ? " _builtin " : " " ) ; <nl> cg . printf ( " if ( cwo ) return cwo - > os_get ( prop ) ; \ n " ) ; <nl> - cg . indentEnd ( " } \ n " ) ; <nl> <nl> - if ( ! system ) { <nl> - cg_printf ( " return get_builtin_static_property ( s , prop ) ; \ n " ) ; <nl> - } else { <nl> - cg_printf ( " return null ; \ n " ) ; <nl> - } <nl> + cg_printf ( " return null ; \ n " ) ; <nl> cg_indentEnd ( " } \ n " ) ; <nl> <nl> cg_indentBegin ( " Variant * get % s_static_property_lv ( CStrRef s , " <nl> void ClassScope : : outputCPPGetStaticPropertyImpl <nl> cg_indentEnd ( " } \ n " ) ; <nl> } <nl> <nl> - cg . indentBegin ( " { \ n " ) ; <nl> cg . printf ( " const ObjectStaticCallbacks * cwo = " <nl> " get % s_object_static_callbacks ( s ) ; \ n " , <nl> system ? " _builtin " : " " ) ; <nl> cg . printf ( " if ( cwo ) return & cwo - > os_lval ( prop ) ; \ n " ) ; <nl> - cg . indentEnd ( " } \ n " ) ; <nl> <nl> - if ( ! system ) { <nl> - cg_printf ( " return get_builtin_static_property_lv ( s , prop ) ; \ n " ) ; <nl> - } else { <nl> - cg_printf ( " return NULL ; \ n " ) ; <nl> - } <nl> + cg_printf ( " return NULL ; \ n " ) ; <nl> cg_indentEnd ( " } \ n " ) ; <nl> } <nl> <nl> void ClassScope : : outputCPPGetClassConstantImpl <nl> ( CodeGenerator & cg , const StringToClassScopePtrVecMap & classScopes ) { <nl> bool system = cg . getOutput ( ) = = CodeGenerator : : SystemCPP ; <nl> cg_indentBegin ( " Variant get % s_class_constant ( CStrRef s , " <nl> - " const char * constant , bool fatal / * = true * / ) { \ n " , <nl> + " const char * constant , int fatal / * = true * / ) { \ n " , <nl> system ? " _builtin " : " " ) ; <nl> if ( ! system & & Option : : EnableEval = = Option : : FullEval ) { <nl> / / See if there ' s an eval ' d version <nl> void ClassScope : : outputCPPGetClassConstantImpl <nl> cg . printf ( " if ( cwo ) return cwo - > os_constant ( constant ) ; \ n " ) ; <nl> cg . indentEnd ( " } \ n " ) ; <nl> <nl> - if ( ! system ) { <nl> - cg_printf ( " return get_builtin_class_constant ( s , constant , fatal ) ; \ n " ) ; <nl> - } else { <nl> - cg_indentBegin ( " if ( fatal ) { \ n " ) ; <nl> - cg_printf ( " raise_error ( \ " Couldn ' t find constant % % s : : % % s \ " , s . data ( ) , " <nl> - " constant ) ; \ n " ) ; <nl> - cg_indentEnd ( ) ; <nl> - cg_indentBegin ( " } else { \ n " ) ; <nl> - cg_printf ( " raise_warning ( \ " Couldn ' t find constant % % s : : % % s \ " , s . data ( ) , " <nl> - " constant ) ; \ n " ) ; <nl> - cg_indentEnd ( " } \ n " ) ; <nl> - cg_printf ( " return null ; \ n " ) ; <nl> - } <nl> + cg_indentBegin ( " if ( fatal > 0 ) { \ n " ) ; <nl> + cg_printf ( " raise_error ( \ " Couldn ' t find constant % % s : : % % s \ " , s . data ( ) , " <nl> + " constant ) ; \ n " ) ; <nl> + cg_indentEnd ( ) ; <nl> + cg_indentBegin ( " } else if ( ! fatal ) { \ n " ) ; <nl> + cg_printf ( " raise_warning ( \ " Couldn ' t find constant % % s : : % % s \ " , s . data ( ) , " <nl> + " constant ) ; \ n " ) ; <nl> + cg_indentEnd ( " } \ n " ) ; <nl> + cg_printf ( " return null ; \ n " ) ; <nl> + <nl> cg_indentEnd ( " } \ n " ) ; <nl> } <nl> <nl> mmm a / src / compiler / analysis / class_scope . h <nl> ppp b / src / compiler / analysis / class_scope . h <nl> class ClassScope : public BlockScope , public FunctionContainer , <nl> void serialize ( JSON : : CodeError : : OutputStream & out ) const ; <nl> void serialize ( JSON : : DocTarget : : OutputStream & out ) const ; <nl> <nl> + static void outputCPPHashTableClasses <nl> + ( CodeGenerator & cg , const StringToClassScopePtrVecMap & classScopes , <nl> + const std : : vector < const char * > & classes ) ; <nl> static void outputCPPClassVarInitImpl ( <nl> CodeGenerator & cg , const StringToClassScopePtrVecMap & classScopes , <nl> const std : : vector < const char * > & classes ) ; <nl> class ClassScope : public BlockScope , public FunctionContainer , <nl> CodeGenerator & cg , AnalysisResultPtr ar , <nl> FunctionScopePtr func , bool fewArgs ) ; <nl> <nl> - static void outputCPPHashTableClassVarInit <nl> - ( CodeGenerator & cg , const StringToClassScopePtrVecMap & classScopes , <nl> - const std : : vector < const char * > & classes ) ; <nl> - <nl> void outputCPPMethodInvokeTable <nl> ( CodeGenerator & cg , AnalysisResultPtr ar , <nl> const std : : vector < const char * > & keys , <nl> mmm a / src / hphp / externals . cpp <nl> ppp b / src / hphp / externals . cpp <nl> Variant get_constant ( CStrRef name , bool error ) { return name ; } <nl> Variant get_builtin_constant ( CStrRef name , bool error ) { return name ; } <nl> ConstantType check_constant ( CStrRef name ) { return NoneBuiltinConstant ; } <nl> Variant get_class_constant ( CStrRef s , const char * prop , <nl> - bool fatal / * = true * / ) { <nl> + int fatal / * = true * / ) { <nl> return null ; <nl> } <nl> <nl> mmm a / src / runtime / base / externals . h <nl> ppp b / src / runtime / base / externals . h <nl> extern ConstantType check_constant ( CStrRef name ) ; <nl> * Getting a class constant <nl> * / <nl> extern Variant get_class_constant ( CStrRef s , const char * prop , <nl> - bool fatal = true ) ; <nl> + int fatal = true ) ; <nl> extern Variant get_builtin_class_constant ( CStrRef s , const char * prop , <nl> - bool fatal = true ) ; <nl> + int fatal = true ) ; <nl> <nl> / * * <nl> * Getting function info <nl> mmm a / src / system / gen / sys / dynamic_table_class . cpp <nl> ppp b / src / system / gen / sys / dynamic_table_class . cpp <nl> const ObjectStaticCallbacks cw_SoapClient = { <nl> } ; <nl> struct hashNodeCTD { <nl> int64 hash ; <nl> + int32 flags ; <nl> + int32 cdec ; <nl> const char * name ; <nl> int64 ptv1 ; <nl> - ObjectData * ( * const ptr2 ) ( ) ; <nl> } ; <nl> static const hashNodeCTD ctdBuckets [ ] = { <nl> - { 0xCDA860E5647C3908LL , " ImageSprite " , ( int64 ) & cw_ImageSprite , & coo_ImageSprite } , <nl> - { 0xA1EF70351574EC09LL , " ReflectionFunction " , ( int64 ) & cw_ReflectionFunction , & coo_ReflectionFunction } , <nl> - { 0xC365BE9E5A85E60BLL , " DOMNotation " , ( int64 ) & cw_DOMNotation , & coo_DOMNotation } , <nl> - { 0x69EDCA1CC29CFE0BLL , " ErrorException " , ( int64 ) & cw_ErrorException , & coo_ErrorException } , <nl> - { 0xE48FB595C359F411LL , " SpoofChecker " , ( int64 ) & cw_SpoofChecker , & coo_SpoofChecker } , <nl> - { 0xF8B42582709BAF12LL , " DebuggerProxy " , ( int64 ) & cw_DebuggerProxy , & coo_DebuggerProxy } , <nl> - { 0xEA76B9AABB7CC713LL , " SimpleXMLElement " , ( int64 ) & cw_SimpleXMLElement , & coo_SimpleXMLElement } , <nl> - { 0xB2E5C767255D2515LL , " SoapFault " , ( int64 ) & cw_SoapFault , & coo_SoapFault } , <nl> - { 0xDCBA5B52C68B501ALL , " DOMEntityReference " , ( int64 ) & cw_DOMEntityReference , & coo_DOMEntityReference } , <nl> - { 0xB5A44A5E6AE2E71DLL , " ReflectionClass " , ( int64 ) & cw_ReflectionClass , & coo_ReflectionClass } , <nl> - { 0xBDB8FB455A602A1ELL , " DateTime " , ( int64 ) & cw_DateTime , & coo_DateTime } , <nl> - { 0x9C85D092180A6325LL , " XMLReader " , ( int64 ) & cw_XMLReader , & coo_XMLReader } , <nl> - { 0xFE66D362EAB5BF2BLL , " SimpleXMLElementIterator " , ( int64 ) & cw_SimpleXMLElementIterator , & coo_SimpleXMLElementIterator } , <nl> - { 0xDA1D16E68CA95F2FLL , " MutableArrayIterator " , ( int64 ) & cw_MutableArrayIterator , & coo_MutableArrayIterator } , <nl> - { 0xFA394042E7488231LL , " FilterIterator " , ( int64 ) & cw_FilterIterator , & coo_FilterIterator } , <nl> - { 0xB65899865E2EAA32LL , " RecursiveIteratorIterator " , ( int64 ) & cw_RecursiveIteratorIterator , & coo_RecursiveIteratorIterator } , <nl> - { 0xC6D7EC2E443AFA34LL , " IteratorIterator " , ( int64 ) & cw_IteratorIterator , & coo_IteratorIterator } , <nl> - { 0xF909270014376235LL , " SoapVar " , ( int64 ) & cw_SoapVar , & coo_SoapVar } , <nl> - { 0xFFF7A8442616EF37LL , " DOMNodeIterator " , ( int64 ) & cw_DOMNodeIterator , & coo_DOMNodeIterator } , <nl> - { 0xE72AD818DDE95538LL , " ReflectionException " , ( int64 ) & cw_ReflectionException , & coo_ReflectionException } , <nl> - { 0xCC6991D3CF5CA03DLL , " Collator " , ( int64 ) & cw_Collator , & coo_Collator } , <nl> - { 0x05DE30099B58533DLL , " GenericContinuation " , ( int64 ) & cw_GenericContinuation , & coo_GenericContinuation } , <nl> - { 0xC9FC1A1F7B878C3ELL , " SoapServer " , ( int64 ) & cw_SoapServer , & coo_SoapServer } , <nl> - { 0xA27C70ED5B143841LL , " OutOfBoundsException " , ( int64 ) & cw_OutOfBoundsException , & coo_OutOfBoundsException } , <nl> - { 0xC253D8488B734244LL , " PDOStatement " , ( int64 ) & cw_PDOStatement , & coo_PDOStatement } , <nl> - { 0x483F5149CA964744LL , " EncodingMatch " , ( int64 ) & cw_EncodingMatch , & coo_EncodingMatch } , <nl> - { 0x996F76C9C527B946LL , " DOMNamedNodeMap " , ( int64 ) & cw_DOMNamedNodeMap , & coo_DOMNamedNodeMap } , <nl> - { 0x66282FC8E4EF4E46LL , " BadFunctionCallException " , ( int64 ) & cw_BadFunctionCallException , & coo_BadFunctionCallException } , <nl> - { 0xDAA2EE582E0D3849LL , " UnexpectedValueException " , ( int64 ) & cw_UnexpectedValueException , & coo_UnexpectedValueException } , <nl> - { 0xF2A49A22C192034ALL , " Memcached " , ( int64 ) & cw_Memcached , & coo_Memcached } , <nl> - { 0xC860DC2F9B0E6D4BLL , " LogicException " , ( int64 ) & cw_LogicException , & coo_LogicException } , <nl> - { 0x964F490DAC49174DLL , " DOMText " , ( int64 ) & cw_DOMText , & coo_DOMText } , <nl> - { 0xB08D1D59986A3D4ELL , " SoapHeader " , ( int64 ) & cw_SoapHeader , & coo_SoapHeader } , <nl> - { 0xDBA243B9FBA7A64FLL , " SplObjectStorage " , ( int64 ) & cw_SplObjectStorage , & coo_SplObjectStorage } , <nl> - { 0xD2EA111229F87A50LL , " Normalizer " , ( int64 ) & cw_Normalizer , & coo_Normalizer } , <nl> - { 0xB955263C51FB4A51LL , " GeneratorClosure " , ( int64 ) & cw_GeneratorClosure , & coo_GeneratorClosure } , <nl> - { 0x82239AA5D64B1453LL , " DOMCharacterData " , ( int64 ) & cw_DOMCharacterData , & coo_DOMCharacterData } , <nl> - { 0x6AF83706F76A9D53LL , " RangeException " , ( int64 ) & cw_RangeException , & coo_RangeException } , <nl> - { 0xF754323897E8A15ELL , " DirectoryIterator " , ( int64 ) & cw_DirectoryIterator , & coo_DirectoryIterator } , <nl> - { 0xF0EBB2CC1FBF9761LL , " DOMDocumentType " , ( int64 ) & cw_DOMDocumentType , & coo_DOMDocumentType } , <nl> - { 0xF0747A09B3523662LL , " DOMNode " , ( int64 ) & cw_DOMNode , & coo_DOMNode } , <nl> - { 0xFD1235273F521A63LL , " DebuggerClient " , ( int64 ) & cw_DebuggerClient , & coo_DebuggerClient } , <nl> - { 0xAFF1EF6EBB3DA065LL , " XMLWriter " , ( int64 ) & cw_XMLWriter , & coo_XMLWriter } , <nl> - { 0xFE07B9C27FE59D68LL , " DOMComment " , ( int64 ) & cw_DOMComment , & coo_DOMComment } , <nl> - { 0x17A40D895C55D968LL , " SoapParam " , ( int64 ) & cw_SoapParam , & coo_SoapParam } , <nl> - { 0xEA5B99D5B1A4566ALL , " DOMException " , ( int64 ) & cw_DOMException , & coo_DOMException } , <nl> - { 0xBCAE91CFA2AAD16BLL , " LengthException " , ( int64 ) & cw_LengthException , & coo_LengthException } , <nl> - { 0xB280F0C292E92A6CLL , " Closure " , ( int64 ) & cw_Closure , & coo_Closure } , <nl> - { 0x8B61E0BFCFA06573LL , " ReflectionExtension " , ( int64 ) & cw_ReflectionExtension , & coo_ReflectionExtension } , <nl> - { 0x3D5870E53BF89873LL , " ArrayIterator " , ( int64 ) & cw_ArrayIterator , & coo_ArrayIterator } , <nl> - { 0x885A4F6FD393D475LL , " InvalidArgumentException " , ( int64 ) & cw_InvalidArgumentException , & coo_InvalidArgumentException } , <nl> - { 0xC1A9F7D81254DD7ALL , " ReflectionObject " , ( int64 ) & cw_ReflectionObject , & coo_ReflectionObject } , <nl> - { 0xFAD161197633B87DLL , " DOMDocumentFragment " , ( int64 ) & cw_DOMDocumentFragment , & coo_DOMDocumentFragment } , <nl> - { 0x2E899D2A2572EF7DLL , " SQLite3Result " , ( int64 ) & cw_SQLite3Result , & coo_SQLite3Result } , <nl> - { 0xD9B3EFD1CD987F80LL , " DOMProcessingInstruction " , ( int64 ) & cw_DOMProcessingInstruction , & coo_DOMProcessingInstruction } , <nl> - { 0xB3BD46E935281082LL , " ReflectionFunctionAbstract " , ( int64 ) & cw_ReflectionFunctionAbstract , & coo_ReflectionFunctionAbstract } , <nl> - { 0xA4DE53FD7D3E8383LL , " DateTimeZone " , ( int64 ) & cw_DateTimeZone , & coo_DateTimeZone } , <nl> - { 0xF0D4A29DE6A7BA84LL , " DomainException " , ( int64 ) & cw_DomainException , & coo_DomainException } , <nl> - { 0xA2500D8A3618ED87LL , " EncodingDetector " , ( int64 ) & cw_EncodingDetector , & coo_EncodingDetector } , <nl> - { 0xB4C95AF311506C8FLL , " Directory " , ( int64 ) & cw_Directory , & coo_Directory } , <nl> - { 0x86335FF83CDEA590LL , " BadMethodCallException " , ( int64 ) & cw_BadMethodCallException , & coo_BadMethodCallException } , <nl> - { 0xAE7081C468A05993LL , " ReflectionParameter " , ( int64 ) & cw_ReflectionParameter , & coo_ReflectionParameter } , <nl> - { 0xEFFDF8DF15CABE94LL , " DOMCDATASection " , ( int64 ) & cw_DOMCDATASection , & coo_DOMCDATASection } , <nl> - { 0x6576EACCCE24D694LL , " __PHP_Incomplete_Class " , ( int64 ) & cw___PHP_Incomplete_Class , & coo___PHP_Incomplete_Class } , <nl> - { 0x47D93E6F80B66A94LL , " Exception " , ( int64 ) & cw_Exception , & coo_Exception } , <nl> - { 0xF1089C29FE923FA7LL , " SplFileInfo " , ( int64 ) & cw_SplFileInfo , & coo_SplFileInfo } , <nl> - { 0xFD46DB2E09C6DBA8LL , " DOMXPath " , ( int64 ) & cw_DOMXPath , & coo_DOMXPath } , <nl> - { 0x9D35C3EFD00E11A9LL , " SQLite3 " , ( int64 ) & cw_SQLite3 , & coo_SQLite3 } , <nl> - { 0xE7E31D42F2DFE4AALL , " Locale " , ( int64 ) & cw_Locale , & coo_Locale } , <nl> - { 0x8A34015F67C804ADLL , " PDO " , ( int64 ) & cw_PDO , & coo_PDO } , <nl> - { 0xAE650138BA043AAFLL , " DOMDocument " , ( int64 ) & cw_DOMDocument , & coo_DOMDocument } , <nl> - { 0xC3BBC8F6F28E44B0LL , " ReflectionMethod " , ( int64 ) & cw_ReflectionMethod , & coo_ReflectionMethod } , <nl> - { 0xEDD772FA3B20B8B1LL , " DOMNodeList " , ( int64 ) & cw_DOMNodeList , & coo_DOMNodeList } , <nl> - { 0xA86D5ADE799762BALL , " FB_MySQLLexer " , ( int64 ) & cw_FB_MySQLLexer , & coo_FB_MySQLLexer } , <nl> - { 0xDD8785ACD19F77BDLL , " PDOException " , ( int64 ) & cw_PDOException , & coo_PDOException } , <nl> - { 0xC0B3C44077F5DDC3LL , " DOMElement " , ( int64 ) & cw_DOMElement , & coo_DOMElement } , <nl> - { 0xAE363D51549781C8LL , " AppendIterator " , ( int64 ) & cw_AppendIterator , & coo_AppendIterator } , <nl> - { 0xA53AC17234FC8CCBLL , " LibXMLError " , ( int64 ) & cw_LibXMLError , & coo_LibXMLError } , <nl> - { 0xBC122DF8859C72D0LL , " Memcache " , ( int64 ) & cw_Memcache , & coo_Memcache } , <nl> - { 0xD2E5CC58B841AED2LL , " SQLite3Stmt " , ( int64 ) & cw_SQLite3Stmt , & coo_SQLite3Stmt } , <nl> - { 0x297ECCC7A259EDD2LL , " SplFileObject " , ( int64 ) & cw_SplFileObject , & coo_SplFileObject } , <nl> - { 0xDE623FEAF7B068D4LL , " UnderflowException " , ( int64 ) & cw_UnderflowException , & coo_UnderflowException } , <nl> - { 0xC64D3427431A6ED8LL , " RecursiveDirectoryIterator " , ( int64 ) & cw_RecursiveDirectoryIterator , & coo_RecursiveDirectoryIterator } , <nl> - { 0x8E4CCE95F6A727D9LL , " OverflowException " , ( int64 ) & cw_OverflowException , & coo_OverflowException } , <nl> - { 0xFA52EBE538182BDALL , " SoapClient " , ( int64 ) & cw_SoapClient , & coo_SoapClient } , <nl> - { 0x9078CB3118A8B3DCLL , " RuntimeException " , ( int64 ) & cw_RuntimeException , & coo_RuntimeException } , <nl> - { 0xE1DD21ABF790E9E2LL , " Continuation " , ( int64 ) & cw_Continuation , & coo_Continuation } , <nl> - { 0xF5AA2571BDB659E4LL , " ReflectionProperty " , ( int64 ) & cw_ReflectionProperty , & coo_ReflectionProperty } , <nl> - { 0xA97174231D4912F4LL , " DOMAttr " , ( int64 ) & cw_DOMAttr , & coo_DOMAttr } , <nl> - { 0xBD69936178BA13F7LL , " stdClass " , ( int64 ) & cw_stdClass , & coo_stdClass } , <nl> - { 0xE22D4DF07E2A01FCLL , " OutOfRangeException " , ( int64 ) & cw_OutOfRangeException , & coo_OutOfRangeException } , <nl> - { 0xF4419B70A46387FDLL , " DOMImplementation " , ( int64 ) & cw_DOMImplementation , & coo_DOMImplementation } , <nl> - { 0x3D290BF933ED12FDLL , " XhprofFrame " , ( int64 ) & cw_XhprofFrame , & coo_XhprofFrame } , <nl> - { 0xE24835B2D74B86FFLL , " DOMEntity " , ( int64 ) & cw_DOMEntity , & coo_DOMEntity } , <nl> - { - 1 , 0 , 0 , 0 } } ; <nl> + { 0x4DA860E5647C3908LL , 1 , 0 , " ImageSprite " , ( int64 ) & cw_ImageSprite } , <nl> + { 0x21EF70351574EC09LL , 1 , 0 , " ReflectionFunction " , ( int64 ) & cw_ReflectionFunction } , <nl> + { 0x4365BE9E5A85E60BLL , 0 , 0 , " DOMNotation " , ( int64 ) & cw_DOMNotation } , <nl> + { 0x69EDCA1CC29CFE0BLL , 1 , 0 , " ErrorException " , ( int64 ) & cw_ErrorException } , <nl> + { 0x648FB595C359F411LL , 1 , 0 , " SpoofChecker " , ( int64 ) & cw_SpoofChecker } , <nl> + { 0x78B42582709BAF12LL , 1 , 0 , " DebuggerProxy " , ( int64 ) & cw_DebuggerProxy } , <nl> + { 0x6A76B9AABB7CC713LL , 1 , 0 , " SimpleXMLElement " , ( int64 ) & cw_SimpleXMLElement } , <nl> + { 0x32E5C767255D2515LL , 1 , 0 , " SoapFault " , ( int64 ) & cw_SoapFault } , <nl> + { 0x5CBA5B52C68B501ALL , 1 , 0 , " DOMEntityReference " , ( int64 ) & cw_DOMEntityReference } , <nl> + { 0x35A44A5E6AE2E71DLL , 1 , 0 , " ReflectionClass " , ( int64 ) & cw_ReflectionClass } , <nl> + { 0x3DB8FB455A602A1ELL , 1 , 0 , " DateTime " , ( int64 ) & cw_DateTime } , <nl> + { 0x1C85D092180A6325LL , 1 , 0 , " XMLReader " , ( int64 ) & cw_XMLReader } , <nl> + { 0x7E66D362EAB5BF2BLL , 1 , 0 , " SimpleXMLElementIterator " , ( int64 ) & cw_SimpleXMLElementIterator } , <nl> + { 0x5A1D16E68CA95F2FLL , 1 , 0 , " MutableArrayIterator " , ( int64 ) & cw_MutableArrayIterator } , <nl> + { 0x7A394042E7488231LL , 1 , 0 , " FilterIterator " , ( int64 ) & cw_FilterIterator } , <nl> + { 0x365899865E2EAA32LL , 1 , 0 , " RecursiveIteratorIterator " , ( int64 ) & cw_RecursiveIteratorIterator } , <nl> + { 0x46D7EC2E443AFA34LL , 1 , 0 , " IteratorIterator " , ( int64 ) & cw_IteratorIterator } , <nl> + { 0x7909270014376235LL , 1 , 0 , " SoapVar " , ( int64 ) & cw_SoapVar } , <nl> + { 0x7FF7A8442616EF37LL , 1 , 0 , " DOMNodeIterator " , ( int64 ) & cw_DOMNodeIterator } , <nl> + { 0x672AD818DDE95538LL , 1 , 0 , " ReflectionException " , ( int64 ) & cw_ReflectionException } , <nl> + { 0x4C6991D3CF5CA03DLL , 0 , 0 , " Collator " , ( int64 ) & cw_Collator } , <nl> + { 0x05DE30099B58533DLL , 1 , 0 , " GenericContinuation " , ( int64 ) & cw_GenericContinuation } , <nl> + { 0x49FC1A1F7B878C3ELL , 1 , 0 , " SoapServer " , ( int64 ) & cw_SoapServer } , <nl> + { 0x227C70ED5B143841LL , 1 , 0 , " OutOfBoundsException " , ( int64 ) & cw_OutOfBoundsException } , <nl> + { 0x4253D8488B734244LL , 0 , 0 , " PDOStatement " , ( int64 ) & cw_PDOStatement } , <nl> + { 0x483F5149CA964744LL , 1 , 0 , " EncodingMatch " , ( int64 ) & cw_EncodingMatch } , <nl> + { 0x196F76C9C527B946LL , 0 , 0 , " DOMNamedNodeMap " , ( int64 ) & cw_DOMNamedNodeMap } , <nl> + { 0x66282FC8E4EF4E46LL , 1 , 0 , " BadFunctionCallException " , ( int64 ) & cw_BadFunctionCallException } , <nl> + { 0x5AA2EE582E0D3849LL , 1 , 0 , " UnexpectedValueException " , ( int64 ) & cw_UnexpectedValueException } , <nl> + { 0x72A49A22C192034ALL , 1 , 0 , " Memcached " , ( int64 ) & cw_Memcached } , <nl> + { 0x4860DC2F9B0E6D4BLL , 1 , 0 , " LogicException " , ( int64 ) & cw_LogicException } , <nl> + { 0x164F490DAC49174DLL , 1 , 0 , " DOMText " , ( int64 ) & cw_DOMText } , <nl> + { 0x308D1D59986A3D4ELL , 1 , 0 , " SoapHeader " , ( int64 ) & cw_SoapHeader } , <nl> + { 0x5BA243B9FBA7A64FLL , 1 , 0 , " SplObjectStorage " , ( int64 ) & cw_SplObjectStorage } , <nl> + { 0x52EA111229F87A50LL , 1 , 0 , " Normalizer " , ( int64 ) & cw_Normalizer } , <nl> + { 0x3955263C51FB4A51LL , 1 , 0 , " GeneratorClosure " , ( int64 ) & cw_GeneratorClosure } , <nl> + { 0x02239AA5D64B1453LL , 0 , 0 , " DOMCharacterData " , ( int64 ) & cw_DOMCharacterData } , <nl> + { 0x6AF83706F76A9D53LL , 1 , 0 , " RangeException " , ( int64 ) & cw_RangeException } , <nl> + { 0x7754323897E8A15ELL , 1 , 0 , " DirectoryIterator " , ( int64 ) & cw_DirectoryIterator } , <nl> + { 0x70EBB2CC1FBF9761LL , 1 , 0 , " DOMDocumentType " , ( int64 ) & cw_DOMDocumentType } , <nl> + { 0x70747A09B3523662LL , 1 , 0 , " DOMNode " , ( int64 ) & cw_DOMNode } , <nl> + { 0x7D1235273F521A63LL , 1 , 0 , " DebuggerClient " , ( int64 ) & cw_DebuggerClient } , <nl> + { 0x2FF1EF6EBB3DA065LL , 1 , 0 , " XMLWriter " , ( int64 ) & cw_XMLWriter } , <nl> + { 0x7E07B9C27FE59D68LL , 0 , 0 , " DOMComment " , ( int64 ) & cw_DOMComment } , <nl> + { 0x17A40D895C55D968LL , 1 , 0 , " SoapParam " , ( int64 ) & cw_SoapParam } , <nl> + { 0x6A5B99D5B1A4566ALL , 1 , 0 , " DOMException " , ( int64 ) & cw_DOMException } , <nl> + { 0x3CAE91CFA2AAD16BLL , 1 , 0 , " LengthException " , ( int64 ) & cw_LengthException } , <nl> + { 0x3280F0C292E92A6CLL , 1 , 0 , " Closure " , ( int64 ) & cw_Closure } , <nl> + { 0x0B61E0BFCFA06573LL , 0 , 0 , " ReflectionExtension " , ( int64 ) & cw_ReflectionExtension } , <nl> + { 0x3D5870E53BF89873LL , 1 , 0 , " ArrayIterator " , ( int64 ) & cw_ArrayIterator } , <nl> + { 0x085A4F6FD393D475LL , 1 , 0 , " InvalidArgumentException " , ( int64 ) & cw_InvalidArgumentException } , <nl> + { 0x41A9F7D81254DD7ALL , 1 , 0 , " ReflectionObject " , ( int64 ) & cw_ReflectionObject } , <nl> + { 0x7AD161197633B87DLL , 0 , 0 , " DOMDocumentFragment " , ( int64 ) & cw_DOMDocumentFragment } , <nl> + { 0x2E899D2A2572EF7DLL , 1 , 0 , " SQLite3Result " , ( int64 ) & cw_SQLite3Result } , <nl> + { 0x59B3EFD1CD987F80LL , 1 , 0 , " DOMProcessingInstruction " , ( int64 ) & cw_DOMProcessingInstruction } , <nl> + { 0x33BD46E935281082LL , 1 , 0 , " ReflectionFunctionAbstract " , ( int64 ) & cw_ReflectionFunctionAbstract } , <nl> + { 0x24DE53FD7D3E8383LL , 1 , 0 , " DateTimeZone " , ( int64 ) & cw_DateTimeZone } , <nl> + { 0x70D4A29DE6A7BA84LL , 1 , 0 , " DomainException " , ( int64 ) & cw_DomainException } , <nl> + { 0x22500D8A3618ED87LL , 1 , 0 , " EncodingDetector " , ( int64 ) & cw_EncodingDetector } , <nl> + { 0x34C95AF311506C8FLL , 1 , 0 , " Directory " , ( int64 ) & cw_Directory } , <nl> + { 0x06335FF83CDEA590LL , 1 , 0 , " BadMethodCallException " , ( int64 ) & cw_BadMethodCallException } , <nl> + { 0x2E7081C468A05993LL , 1 , 0 , " ReflectionParameter " , ( int64 ) & cw_ReflectionParameter } , <nl> + { 0x6FFDF8DF15CABE94LL , 0 , 0 , " DOMCDATASection " , ( int64 ) & cw_DOMCDATASection } , <nl> + { 0x6576EACCCE24D694LL , 0 , 0 , " __PHP_Incomplete_Class " , ( int64 ) & cw___PHP_Incomplete_Class } , <nl> + { 0x47D93E6F80B66A94LL , 1 , 0 , " Exception " , ( int64 ) & cw_Exception } , <nl> + { 0x71089C29FE923FA7LL , 1 , 0 , " SplFileInfo " , ( int64 ) & cw_SplFileInfo } , <nl> + { 0x7D46DB2E09C6DBA8LL , 1 , 0 , " DOMXPath " , ( int64 ) & cw_DOMXPath } , <nl> + { 0x1D35C3EFD00E11A9LL , 1 , 0 , " SQLite3 " , ( int64 ) & cw_SQLite3 } , <nl> + { 0x67E31D42F2DFE4AALL , 1 , 0 , " Locale " , ( int64 ) & cw_Locale } , <nl> + { 0x0A34015F67C804ADLL , 1 , 0 , " PDO " , ( int64 ) & cw_PDO } , <nl> + { 0x2E650138BA043AAFLL , 1 , 0 , " DOMDocument " , ( int64 ) & cw_DOMDocument } , <nl> + { 0x43BBC8F6F28E44B0LL , 1 , 0 , " ReflectionMethod " , ( int64 ) & cw_ReflectionMethod } , <nl> + { 0x6DD772FA3B20B8B1LL , 1 , 0 , " DOMNodeList " , ( int64 ) & cw_DOMNodeList } , <nl> + { 0x286D5ADE799762BALL , 1 , 0 , " FB_MySQLLexer " , ( int64 ) & cw_FB_MySQLLexer } , <nl> + { 0x5D8785ACD19F77BDLL , 1 , 0 , " PDOException " , ( int64 ) & cw_PDOException } , <nl> + { 0x40B3C44077F5DDC3LL , 1 , 0 , " DOMElement " , ( int64 ) & cw_DOMElement } , <nl> + { 0x2E363D51549781C8LL , 1 , 0 , " AppendIterator " , ( int64 ) & cw_AppendIterator } , <nl> + { 0x253AC17234FC8CCBLL , 1 , 0 , " LibXMLError " , ( int64 ) & cw_LibXMLError } , <nl> + { 0x3C122DF8859C72D0LL , 1 , 0 , " Memcache " , ( int64 ) & cw_Memcache } , <nl> + { 0x52E5CC58B841AED2LL , 0 , 0 , " SQLite3Stmt " , ( int64 ) & cw_SQLite3Stmt } , <nl> + { 0x297ECCC7A259EDD2LL , 1 , 0 , " SplFileObject " , ( int64 ) & cw_SplFileObject } , <nl> + { 0x5E623FEAF7B068D4LL , 1 , 0 , " UnderflowException " , ( int64 ) & cw_UnderflowException } , <nl> + { 0x464D3427431A6ED8LL , 1 , 0 , " RecursiveDirectoryIterator " , ( int64 ) & cw_RecursiveDirectoryIterator } , <nl> + { 0x0E4CCE95F6A727D9LL , 1 , 0 , " OverflowException " , ( int64 ) & cw_OverflowException } , <nl> + { 0x7A52EBE538182BDALL , 1 , 0 , " SoapClient " , ( int64 ) & cw_SoapClient } , <nl> + { 0x1078CB3118A8B3DCLL , 1 , 0 , " RuntimeException " , ( int64 ) & cw_RuntimeException } , <nl> + { 0x61DD21ABF790E9E2LL , 1 , 0 , " Continuation " , ( int64 ) & cw_Continuation } , <nl> + { 0x75AA2571BDB659E4LL , 1 , 0 , " ReflectionProperty " , ( int64 ) & cw_ReflectionProperty } , <nl> + { 0x297174231D4912F4LL , 1 , 0 , " DOMAttr " , ( int64 ) & cw_DOMAttr } , <nl> + { 0x3D69936178BA13F7LL , 1 , 0 , " stdClass " , ( int64 ) & cw_stdClass } , <nl> + { 0x622D4DF07E2A01FCLL , 1 , 0 , " OutOfRangeException " , ( int64 ) & cw_OutOfRangeException } , <nl> + { 0x74419B70A46387FDLL , 0 , 0 , " DOMImplementation " , ( int64 ) & cw_DOMImplementation } , <nl> + { 0x3D290BF933ED12FDLL , 1 , 0 , " XhprofFrame " , ( int64 ) & cw_XhprofFrame } , <nl> + { 0x624835B2D74B86FFLL , 1 , 0 , " DOMEntity " , ( int64 ) & cw_DOMEntity } , <nl> + } ; <nl> static const int ctdMapTable [ ] = { <nl> - 1 , - 1 , - 1 , - 1 , - 1 , - 1 , - 1 , - 1 , <nl> 0 , 1 , - 1 , 2 , - 1 , - 1 , - 1 , - 1 , <nl> findCTD ( CStrRef name ) { <nl> int o = ctdMapTable [ hash & 255 ] ; <nl> if ( UNLIKELY ( o < 0 ) ) return NULL ; <nl> const hashNodeCTD * p = & ctdBuckets [ o ] ; <nl> - int64 h = p - > hash & ( uint64 ( - 1 ) > > 1 ) ; <nl> do { <nl> + int64 h = p - > hash ; <nl> if ( h = = hash & & ( LIKELY ( p - > name = = name . data ( ) ) | | LIKELY ( ! strcasecmp ( p - > name , name . data ( ) ) ) ) ) return p ; <nl> - h = ( + + p ) - > hash ; <nl> - } while ( h > = 0 ) ; <nl> + } while ( ! ( p + + - > flags & 1 ) ) ; <nl> return NULL ; <nl> } <nl> - Variant get_builtin_class_var_init ( CStrRef s , const char * var ) { <nl> + const ObjectStaticCallbacks * get_builtin_object_static_callbacks ( CStrRef s ) { <nl> const hashNodeCTD * p = findCTD ( s ) ; <nl> if ( p ) { <nl> - return ( ( const ObjectStaticCallbacks * ) p - > ptv1 ) - > os_getInit ( var ) ; <nl> + return ( ( const ObjectStaticCallbacks * ) p - > ptv1 ) ; <nl> } <nl> - return throw_missing_class ( s ) ; <nl> + return NULL ; <nl> + } <nl> + Variant get_builtin_class_var_init ( CStrRef s , const char * var ) { <nl> + const ObjectStaticCallbacks * cwo = get_builtin_object_static_callbacks ( s ) ; <nl> + return LIKELY ( cwo ! = 0 ) ? cwo - > os_getInit ( var ) : throw_missing_class ( s ) ; <nl> } <nl> ObjectData * create_builtin_object_only_no_init ( CStrRef s , ObjectData * root / * = NULL * / ) { <nl> - const hashNodeCTD * p = findCTD ( s ) ; <nl> - if ( p ) { <nl> - return p - > ptr2 ( ) ; <nl> - } <nl> + const ObjectStaticCallbacks * cwo = get_builtin_object_static_callbacks ( s ) ; <nl> + if ( LIKELY ( cwo ! = 0 ) ) return cwo - > createOnlyNoInit ( root ) ; <nl> throw_missing_class ( s ) ; <nl> return 0 ; <nl> } <nl> Object create_builtin_object_only ( CStrRef s , ObjectData * root / * = NULL * / ) { <nl> } <nl> bool get_call_info_static_method_builtin ( MethodCallPackage & mcp ) { <nl> StringData * s ATTRIBUTE_UNUSED ( mcp . rootCls ) ; <nl> - const hashNodeCTD * p = findCTD ( StrNR ( s ) ) ; <nl> - const ObjectStaticCallbacks * osc = p ? ( const ObjectStaticCallbacks * ) p - > ptv1 : 0 ; <nl> - return ObjectStaticCallbacks : : GetCallInfo ( osc , mcp , - 1 ) ; <nl> - } <nl> - const ObjectStaticCallbacks * get_builtin_object_static_callbacks ( CStrRef s ) { <nl> - const hashNodeCTD * p = findCTD ( s ) ; <nl> - if ( p ) { <nl> - return ( ( const ObjectStaticCallbacks * ) p - > ptv1 ) ; <nl> - } <nl> - return NULL ; <nl> + const ObjectStaticCallbacks * cwo = get_builtin_object_static_callbacks ( s ) ; <nl> + return ObjectStaticCallbacks : : GetCallInfo ( cwo , mcp , - 1 ) ; <nl> } <nl> Variant get_builtin_static_property ( CStrRef s , const char * prop ) { <nl> - { <nl> - const ObjectStaticCallbacks * cwo = get_builtin_object_static_callbacks ( s ) ; <nl> - if ( cwo ) return cwo - > os_get ( prop ) ; <nl> - } <nl> + const ObjectStaticCallbacks * cwo = get_builtin_object_static_callbacks ( s ) ; <nl> + if ( cwo ) return cwo - > os_get ( prop ) ; <nl> return null ; <nl> } <nl> Variant * get_builtin_static_property_lv ( CStrRef s , const char * prop ) { <nl> - { <nl> - const ObjectStaticCallbacks * cwo = get_builtin_object_static_callbacks ( s ) ; <nl> - if ( cwo ) return & cwo - > os_lval ( prop ) ; <nl> - } <nl> + const ObjectStaticCallbacks * cwo = get_builtin_object_static_callbacks ( s ) ; <nl> + if ( cwo ) return & cwo - > os_lval ( prop ) ; <nl> return NULL ; <nl> } <nl> - Variant get_builtin_class_constant ( CStrRef s , const char * constant , bool fatal / * = true * / ) { <nl> + Variant get_builtin_class_constant ( CStrRef s , const char * constant , int fatal / * = true * / ) { <nl> { <nl> const ObjectStaticCallbacks * cwo = get_builtin_object_static_callbacks ( s ) ; <nl> if ( cwo ) return cwo - > os_constant ( constant ) ; <nl> } <nl> - if ( fatal ) { <nl> + if ( fatal > 0 ) { <nl> raise_error ( " Couldn ' t find constant % s : : % s " , s . data ( ) , constant ) ; <nl> - } else { <nl> + } else if ( ! fatal ) { <nl> raise_warning ( " Couldn ' t find constant % s : : % s " , s . data ( ) , constant ) ; <nl> } <nl> return null ; <nl> mmm a / src / test / test_externals . cpp <nl> ppp b / src / test / test_externals . cpp <nl> Variant get_constant ( CStrRef name , bool error ) { <nl> return name ; <nl> } <nl> Variant get_class_constant ( CStrRef s , const char * prop , <nl> - bool fatal / * = true * / ) { <nl> + int fatal / * = true * / ) { <nl> return null ; <nl> } <nl> <nl>
[ Perf ] Better dynamic_table_class . cpp
facebook/hhvm
db471e305c7f5eee589757513a6a6c42718a056c
2011-10-17T23:50:30Z
mmm a / tensorflow / core / profiler / utils / xplane_builder . h <nl> ppp b / tensorflow / core / profiler / utils / xplane_builder . h <nl> class XStatsBuilder { <nl> } <nl> void AddStatValue ( const XStatMetadata & metadata , <nl> unsigned long value ) { / / NOLINT <nl> - if ( sizeof ( unsigned long ) = = 8 ) { / / NOLINT <nl> - AddStat ( metadata ) - > set_uint64_value ( value ) ; <nl> - } else { <nl> - AddStat ( metadata ) - > set_uint32_value ( value ) ; <nl> - } <nl> + AddStat ( metadata ) - > set_uint64_value ( value ) ; <nl> } <nl> void AddStatValue ( const XStatMetadata & metadata , <nl> unsigned long long value ) { / / NOLINT <nl> class XStatsBuilder { <nl> AddStat ( metadata ) - > set_int64_value ( value ) ; <nl> } <nl> void AddStatValue ( const XStatMetadata & metadata , long value ) { / / NOLINT <nl> - if ( sizeof ( long ) = = 8 ) { / / NOLINT <nl> - AddStat ( metadata ) - > set_int64_value ( value ) ; <nl> - } else { <nl> - AddStat ( metadata ) - > set_int32_value ( value ) ; <nl> - } <nl> + AddStat ( metadata ) - > set_int64_value ( value ) ; <nl> } <nl> void AddStatValue ( const XStatMetadata & metadata , long long value ) { / / NOLINT <nl> AddStat ( metadata ) - > set_int64_value ( value ) ; <nl>
Fixing a latent compilation error . XStat has no set_uint32_value
tensorflow/tensorflow
781b6d3fda87832300070eaa320f0420850ef797
2020-07-24T17:39:18Z
mmm a / lib / Sema / TypeCheckProtocol . cpp <nl> ppp b / lib / Sema / TypeCheckProtocol . cpp <nl> void TypeChecker : : inferDefaultWitnesses ( ProtocolDecl * proto ) { <nl> DefaultWitnessChecker checker ( Context , proto ) ; <nl> <nl> / / Find the default for the given associated type . <nl> - auto findAssociatedTypeDefault = <nl> - [ & ] ( AssociatedTypeDecl * assocType , <nl> - AssociatedTypeDecl * * defaultedAssocTypeOut = nullptr ) - > Type { <nl> + auto findAssociatedTypeDefault = [ ] ( AssociatedTypeDecl * assocType ) <nl> + - > std : : pair < Type , AssociatedTypeDecl * > { <nl> auto defaultedAssocType = <nl> AssociatedTypeInference : : findDefaultedAssociatedType ( assocType ) ; <nl> if ( ! defaultedAssocType ) <nl> - return nullptr ; <nl> + return { Type ( ) , nullptr } ; <nl> <nl> Type defaultType = defaultedAssocType - > getDefaultDefinitionType ( ) ; <nl> if ( ! defaultType ) <nl> - return nullptr ; <nl> - <nl> - if ( defaultedAssocTypeOut ) <nl> - * defaultedAssocTypeOut = defaultedAssocType ; <nl> + return { Type ( ) , nullptr } ; <nl> <nl> - return defaultType ; <nl> + return { defaultType , defaultedAssocType } ; <nl> } ; <nl> <nl> for ( auto * requirement : proto - > getMembers ( ) ) { <nl> void TypeChecker : : inferDefaultWitnesses ( ProtocolDecl * proto ) { <nl> <nl> if ( auto assocType = dyn_cast < AssociatedTypeDecl > ( valueDecl ) ) { <nl> if ( assocType - > getOverriddenDecls ( ) . empty ( ) ) { <nl> - if ( Type defaultType = findAssociatedTypeDefault ( assocType ) ) <nl> + if ( Type defaultType = findAssociatedTypeDefault ( assocType ) . first ) <nl> proto - > setDefaultTypeWitness ( assocType , defaultType ) ; <nl> } <nl> <nl> void TypeChecker : : inferDefaultWitnesses ( ProtocolDecl * proto ) { <nl> } <nl> <nl> / / Dig out the default associated type definition . <nl> - AssociatedTypeDecl * defaultedAssocType = nullptr ; <nl> - Type defaultAssocType = findAssociatedTypeDefault ( assocType , <nl> - & defaultedAssocType ) ; <nl> + AssociatedTypeDecl * defaultedAssocDecl = nullptr ; <nl> + Type defaultAssocType ; <nl> + std : : tie ( defaultAssocType , defaultedAssocDecl ) = <nl> + findAssociatedTypeDefault ( assocType ) ; <nl> if ( ! defaultAssocType ) <nl> continue ; <nl> <nl> void TypeChecker : : inferDefaultWitnesses ( ProtocolDecl * proto ) { <nl> if ( conformance . isInvalid ( ) ) { <nl> / / Diagnose the lack of a conformance . This is potentially an ABI <nl> / / incompatibility . <nl> - diagnose ( proto , diag : : assoc_type_default_conformance_failed , <nl> - defaultAssocType , assocType - > getFullName ( ) , req . getFirstType ( ) , <nl> - req . getSecondType ( ) ) ; <nl> - diagnose ( defaultedAssocType , diag : : assoc_type_default_here , <nl> - assocType - > getFullName ( ) , defaultAssocType ) <nl> - . highlight ( <nl> - defaultedAssocType - > getDefaultDefinitionTypeRepr ( ) - > getSourceRange ( ) ) ; <nl> + proto - > diagnose ( diag : : assoc_type_default_conformance_failed , <nl> + defaultAssocType , assocType - > getFullName ( ) , <nl> + req . getFirstType ( ) , req . getSecondType ( ) ) ; <nl> + defaultedAssocDecl <nl> + - > diagnose ( diag : : assoc_type_default_here , assocType - > getFullName ( ) , <nl> + defaultAssocType ) <nl> + . highlight ( defaultedAssocDecl - > getDefaultDefinitionTypeRepr ( ) <nl> + - > getSourceRange ( ) ) ; <nl> <nl> continue ; <nl> } <nl>
Merge pull request from CodaFi / trompe - l - ambda
apple/swift
6172fcca297f514ae6ecfe3b06356867e1255ab3
2019-10-31T21:24:32Z
mmm a / hphp / hhbbc / abstract - interp . cpp <nl> ppp b / hphp / hhbbc / abstract - interp . cpp <nl> const StaticString s_empty ( " " ) ; <nl> const StaticString s_extract ( " extract " ) ; <nl> const StaticString s_Exception ( " Exception " ) ; <nl> const StaticString s_Continuation ( " Continuation " ) ; <nl> + const StaticString s_stdClass ( " stdClass " ) ; <nl> const StaticString s_unreachable ( " static analysis error : supposedly " <nl> " unreachable code was reached " ) ; <nl> <nl> bool couldBeEmptyish ( Type ty ) { <nl> ty . couldBe ( TFalse ) ; <nl> } <nl> <nl> + bool mustBeEmptyish ( Type ty ) { <nl> + return ty . subtypeOf ( TNull ) | | <nl> + ty . subtypeOf ( sval ( s_empty . get ( ) ) ) | | <nl> + ty . subtypeOf ( TFalse ) ; <nl> + } <nl> + <nl> bool elemCouldPromoteToArr ( Type ty ) { return couldBeEmptyish ( ty ) ; } <nl> bool propCouldPromoteToObj ( Type ty ) { return couldBeEmptyish ( ty ) ; } <nl> + bool elemMustPromoteToArr ( Type ty ) { return mustBeEmptyish ( ty ) ; } <nl> + bool propMustPromoteToObj ( Type ty ) { return mustBeEmptyish ( ty ) ; } <nl> <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> struct InterpStepper : boost : : static_visitor < void > { <nl> if ( auto const name = state . base . locName ) { <nl> auto const ty = thisPropAsCell ( name ) ; <nl> if ( ty & & propCouldPromoteToObj ( * ty ) ) { <nl> - / / Note : we could merge Obj = stdClass here , but aren ' t doing so <nl> - / / yet . <nl> - mergeThisProp ( name , TObj ) ; <nl> + mergeThisProp ( name , <nl> + objExact ( m_index . builtin_class ( m_ctx , s_stdClass . get ( ) ) ) ) ; <nl> } <nl> return ; <nl> } <nl> struct InterpStepper : boost : : static_visitor < void > { <nl> } ) ; <nl> } <nl> <nl> - void handleInThisElemD ( MInstrState & state ) { <nl> - if ( ! couldBeInThis ( state . base ) ) return ; <nl> + void handleInSelfPropD ( MInstrState & state ) { <nl> + if ( ! couldBeInSelf ( state . base ) ) return ; <nl> <nl> if ( auto const name = state . base . locName ) { <nl> auto const ty = thisPropAsCell ( name ) ; <nl> - if ( ty & & elemCouldPromoteToArr ( * ty ) ) { <nl> - mergeThisProp ( name , TArr ) ; <nl> + if ( ty & & propCouldPromoteToObj ( * ty ) ) { <nl> + mergeSelfProp ( name , <nl> + objExact ( m_index . builtin_class ( m_ctx , s_stdClass . get ( ) ) ) ) ; <nl> } <nl> return ; <nl> } <nl> <nl> - mergeEachThisPropRaw ( [ & ] ( Type t ) { <nl> - return elemCouldPromoteToArr ( t ) ? TArr : TBottom ; <nl> - } ) ; <nl> + loseNonRefSelfPropTypes ( ) ; <nl> } <nl> <nl> - void handleInSelfPropD ( MInstrState & state ) { <nl> - if ( ! couldBeInSelf ( state . base ) ) return ; <nl> + void handleInThisElemD ( MInstrState & state ) { <nl> + if ( ! couldBeInThis ( state . base ) ) return ; <nl> <nl> if ( auto const name = state . base . locName ) { <nl> auto const ty = thisPropAsCell ( name ) ; <nl> - if ( ty & & propCouldPromoteToObj ( * ty ) ) { <nl> - / / Note : similar to handleInThisPropD , logically this could be <nl> - / / merging Obj = stdClass . <nl> - mergeSelfProp ( name , TObj ) ; <nl> + if ( ty & & elemCouldPromoteToArr ( * ty ) ) { <nl> + mergeThisProp ( name , TArr ) ; <nl> } <nl> return ; <nl> } <nl> <nl> - loseNonRefSelfPropTypes ( ) ; <nl> + mergeEachThisPropRaw ( [ & ] ( Type t ) { <nl> + return elemCouldPromoteToArr ( t ) ? TArr : TBottom ; <nl> + } ) ; <nl> } <nl> <nl> void handleInSelfElemD ( MInstrState & state ) { <nl> struct InterpStepper : boost : : static_visitor < void > { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> / / base ops <nl> <nl> + void handleLocBasePropD ( const MInstrState & state ) { <nl> + auto const locTy = locAsCell ( state . mvec . locBase ) ; <nl> + if ( propMustPromoteToObj ( locTy ) ) { <nl> + auto const ty = objExact ( m_index . builtin_class ( m_ctx , s_stdClass . get ( ) ) ) ; <nl> + setLoc ( state . mvec . locBase , ty ) ; <nl> + return ; <nl> + } <nl> + if ( propCouldPromoteToObj ( locTy ) ) { <nl> + setLoc ( state . mvec . locBase , union_of ( locTy , TObj ) ) ; <nl> + } <nl> + } <nl> + <nl> + void handleLocBaseElemD ( const MInstrState & state ) { <nl> + auto const locTy = locAsCell ( state . mvec . locBase ) ; <nl> + if ( locTy . subtypeOf ( TArr ) | | elemMustPromoteToArr ( locTy ) ) { <nl> + / / We need to do this even if it was already an array , because <nl> + / / we may modify it if it was an SArr or SArr = . <nl> + setLoc ( state . mvec . locBase , TArr ) ; <nl> + return ; <nl> + } <nl> + if ( elemCouldPromoteToArr ( locTy ) ) { <nl> + setLoc ( state . mvec . locBase , union_of ( locTy , TArr ) ) ; <nl> + } <nl> + } <nl> + <nl> / * <nl> * Local bases can change the type of the local depending on the <nl> - * mvector , and the next dim . The current behavior here is very <nl> - * conservative . <nl> - * <nl> - * Basically for now , if we ' re about to do property dims and it ' s <nl> - * not an Obj , we give up , and if we ' re about to do elem dims and <nl> - * it ' s not an Arr or Obj , we give up . <nl> - * <nl> - * TODO ( # 3343813 ) : make this more precise . <nl> + * mvector , and the next dim . This function updates the types as <nl> + * well as calling the appropriate handler to compute effects on <nl> + * local types . <nl> * / <nl> Base miBaseLoc ( const MInstrState & state ) { <nl> auto & info = state . info ; <nl> auto & mvec = state . mvec ; <nl> bool const isDefine = info . getAttr ( mvec . lcode ) & MIA_define ; <nl> <nl> - if ( isDefine ) ensureInit ( mvec . locBase ) ; <nl> - <nl> - auto const locTy = derefLoc ( mvec . locBase ) ; <nl> if ( info . m_instr = = MI_UnsetM ) { <nl> / / Unsetting can turn static strings and arrays non - static . <nl> - auto const loose = loosen_statics ( locTy ) ; <nl> + auto const loose = loosen_statics ( derefLoc ( mvec . locBase ) ) ; <nl> setLoc ( mvec . locBase , loose ) ; <nl> return Base { loose , BaseLoc : : Frame } ; <nl> } <nl> <nl> - if ( ! isDefine ) { <nl> - return Base { locTy , BaseLoc : : Frame } ; <nl> - } <nl> + if ( ! isDefine ) return Base { derefLoc ( mvec . locBase ) , BaseLoc : : Frame } ; <nl> + <nl> + ensureInit ( mvec . locBase ) ; <nl> <nl> auto const firstDim = mvec . mcodes [ 0 ] . mcode ; <nl> if ( mcodeIsProp ( firstDim ) ) { <nl> - if ( ! locTy . subtypeOf ( TObj ) ) { <nl> - setLoc ( mvec . locBase , TInitCell ) ; <nl> - } <nl> - } <nl> - <nl> - if ( mcodeIsElem ( firstDim ) | | firstDim = = MemberCode : : MW ) { <nl> - if ( locTy . strictSubtypeOf ( TArr ) ) { <nl> - / / We ' re potentially about to mutate any constant or static <nl> - / / array , so raise it to TArr for now . <nl> - setLoc ( mvec . locBase , TArr ) ; <nl> - } else if ( ! locTy . subtypeOfAny ( TArr , TObj ) ) { <nl> - / / We ' re not handling things other than TArr and TObj subtypes <nl> - / / so far . <nl> - setLoc ( mvec . locBase , TInitCell ) ; <nl> - } <nl> + handleLocBasePropD ( state ) ; <nl> + } else if ( mcodeIsElem ( firstDim ) | | firstDim = = MemberCode : : MW ) { <nl> + handleLocBaseElemD ( state ) ; <nl> } <nl> <nl> return Base { locAsCell ( mvec . locBase ) , BaseLoc : : Frame } ; <nl> struct InterpStepper : boost : : static_visitor < void > { <nl> name } ; <nl> return ; <nl> } <nl> - / / TODO ( # 3343813 ) : if it must be null , false , or " " we could use a <nl> - / / exact PostProp of Obj = stdclass to avoid future dims returning <nl> - / / couldBeThisObj . <nl> <nl> / * <nl> * Otherwise , intermediate props with define can promote a null , <nl> struct InterpStepper : boost : : static_visitor < void > { <nl> * The base may also legitimately be an object and our next base <nl> * is in an object property . <nl> * <nl> - * We conservatively treat all these cases as " possibly " being <nl> - * inside of an object property with " PostProp " with locType TTop . <nl> + * If we know for sure we ' re promoting to stdClass , we can put the <nl> + * locType pointing at that . Otherwise we conservatively treat <nl> + * all these cases as " possibly " being inside of an object <nl> + * property with " PostProp " with locType TTop . <nl> * / <nl> - state . base = Base { TInitCell , BaseLoc : : PostProp , TTop , name } ; <nl> + auto const newBaseLocTy = <nl> + propMustPromoteToObj ( state . base . type ) <nl> + ? objExact ( m_index . builtin_class ( m_ctx , s_stdClass . get ( ) ) ) <nl> + : TTop ; <nl> + <nl> + state . base = Base { TInitCell , BaseLoc : : PostProp , newBaseLocTy , name } ; <nl> } <nl> <nl> void miElem ( MInstrState & state ) { <nl> struct InterpStepper : boost : : static_visitor < void > { <nl> / / Final elem ops <nl> <nl> void miFinalSetElem ( MInstrState & state ) { <nl> + / / TODO ( # 3343813 ) : we should push the type of the rhs when we can ; <nl> + / / SetM has some weird cases where it pushes null instead to <nl> + / / handle . <nl> mcodeKey ( state ) ; <nl> auto const t1 = popC ( ) ; <nl> miPop ( state ) ; <nl> struct InterpStepper : boost : : static_visitor < void > { <nl> auto v = locRaw ( l ) ; <nl> if ( v . couldBe ( TUninit ) ) { <nl> if ( v . subtypeOf ( TNull ) ) return setLocRaw ( l , TInitNull ) ; <nl> - if ( v . subtypeOf ( TUnc ) ) return setLocRaw ( l , TUnc ) ; <nl> + if ( v . subtypeOf ( TUnc ) ) return setLocRaw ( l , TInitUnc ) ; <nl> if ( v . subtypeOf ( TCell ) ) return setLocRaw ( l , TInitCell ) ; <nl> if ( v . subtypeOf ( TGen ) ) return setLocRaw ( l , TInitGen ) ; <nl> } <nl> new file mode 100644 <nl> index 00000000000 . . b81f7d4b9e8 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / hhbbc / minstr_001 . php <nl> <nl> + < ? php <nl> + <nl> + function foo ( ) { <nl> + $ lol = new stdclass ; <nl> + $ x [ $ lol ] = 2 ; <nl> + var_dump ( $ x ) ; <nl> + } <nl> + <nl> + foo ( ) ; <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . 43cbe853873 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / hhbbc / minstr_001 . php . expectf <nl> <nl> + HipHop Warning : Invalid operand type was used : Invalid type used as key in % s / slow / hhbbc / minstr_001 . php on line 5 <nl> + array ( 0 ) { <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . d9c95b62f44 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / hhbbc / minstr_002 . php <nl> <nl> + < ? php <nl> + <nl> + function foo ( ) { <nl> + $ x - > foo = " heh " ; <nl> + return $ x ; <nl> + } <nl> + <nl> + function bar ( ) { <nl> + var_dump ( foo ( ) ) ; <nl> + } <nl> + <nl> + bar ( ) ; <nl> + <nl> + <nl> new file mode 100644 <nl> index 00000000000 . . 7b6fa9a77fb <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / hhbbc / minstr_002 . php . expectf <nl> <nl> + HipHop Warning : Creating default object from empty value in % s / slow / hhbbc / minstr_002 . php on line 4 <nl> + object ( stdClass ) # 1 ( 1 ) { <nl> + [ " foo " ] = > <nl> + string ( 3 ) " heh " <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . f1f21e4b338 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / hhbbc / minstr_004 . php <nl> <nl> + < ? hh <nl> + <nl> + class Foo { <nl> + private $ x ; <nl> + <nl> + public function get ( ) { return $ this - > x ; } <nl> + public function set ( $ y ) { $ this - > x - > x = $ y ; } <nl> + } <nl> + <nl> + function main ( ) { <nl> + $ x = new Foo ( ) ; <nl> + var_dump ( $ x ) ; <nl> + $ x - > set ( 12 ) ; <nl> + var_dump ( $ x - > get ( ) ) ; <nl> + var_dump ( $ x ) ; <nl> + } <nl> + <nl> + main ( ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 78b7dc6fb77 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / hhbbc / minstr_004 . php . expectf <nl> <nl> + object ( Foo ) # 1 ( 1 ) { <nl> + [ " x " : " Foo " : private ] = > <nl> + NULL <nl> + } <nl> + HipHop Warning : Creating default object from empty value in % s / slow / hhbbc / minstr_004 . php on line 7 <nl> + object ( stdClass ) # 2 ( 1 ) { <nl> + [ " x " ] = > <nl> + int ( 12 ) <nl> + } <nl> + object ( Foo ) # 1 ( 1 ) { <nl> + [ " x " : " Foo " : private ] = > <nl> + object ( stdClass ) # 2 ( 1 ) { <nl> + [ " x " ] = > <nl> + int ( 12 ) <nl> + } <nl> + } <nl> new file mode 100644 <nl> index 00000000000 . . f8603a7bd00 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / hhbbc / minstr_005 . php <nl> <nl> + < ? hh <nl> + <nl> + class SomethingUnrelated { <nl> + public $ name ; <nl> + } <nl> + <nl> + class Foo { <nl> + private $ name = " Foo " ; <nl> + <nl> + public function blah ( SomethingUnrelated $ heh ) { <nl> + $ heh - > name = 1024 ; <nl> + } <nl> + <nl> + public function getName ( ) { return $ this - > name ; } <nl> + } <nl> + <nl> + function main ( ) { <nl> + $ x = new Foo ( ) ; <nl> + $ x - > blah ( new SomethingUnrelated ) ; <nl> + var_dump ( $ x - > getName ( ) ) ; <nl> + } <nl> + main ( ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 2c9726ef436 <nl> mmm / dev / null <nl> ppp b / hphp / test / slow / hhbbc / minstr_005 . php . expect <nl> @ @ - 0 , 0 + 1 @ @ <nl> + string ( 3 ) " Foo " <nl> \ No newline at end of file <nl>
Track local base types more precisely ; use Obj = stdClass for must - promote cases
facebook/hhvm
fa63ab1b5060cb58c06b25afce9069c5e6b24e8e
2014-02-12T16:39:09Z
mmm a / src / arch / linux / disk / aio . hpp <nl> ppp b / src / arch / linux / disk / aio . hpp <nl> class linux_diskmgr_aio_t : <nl> fd_t get_fd ( ) const { return this - > aio_fildes ; } <nl> void * get_buf ( ) const { return this - > u . c . buf ; } <nl> bool get_is_read ( ) const { return ( this - > aio_lio_opcode = = IO_CMD_PREAD ) ; } <nl> + bool get_is_write ( ) const { return ! get_is_read ( ) ; } <nl> off_t get_offset ( ) const { return this - > u . c . offset ; } <nl> size_t get_count ( ) const { return this - > u . c . nbytes ; } <nl> private : <nl> mmm a / src / arch / linux / disk / conflict_resolving . hpp <nl> ppp b / src / arch / linux / disk / conflict_resolving . hpp <nl> function that you provide . <nl> needs in order to complete the disk request . It must expose the following member <nl> functions : <nl> bool get_is_read ( ) const ; <nl> + bool get_is_write ( ) const ; <nl> off_t get_offset ( ) const ; <nl> size_t get_count ( ) const ; <nl> void * get_buf ( ) const ; <nl> struct conflict_resolving_diskmgr_t { <nl> } <nl> <nl> / * For each chunk " B " in the file : <nl> - <nl> - active_chunks [ B ] is true if there is at least one operation that is operating on <nl> - or is waiting to operate on that chunk . <nl> - <nl> - waiters [ B ] contains a deque of things that are waiting to operate on that chunk <nl> - but cannot because something else is currently operating on that chunk . It could be <nl> - a multimap instead , but that would mean depending on properties of multimaps that <nl> + chunk_queues [ B ] contains a deque of things that are either ( a ) waiting to operate on that chunk <nl> + but cannot because something else is currently operating on that chunk , or ( b ) <nl> + which are currently operating on that chunk . In case ( b ) , that operation <nl> + is always the first one on the deque and there can just be one such operation . <nl> + If no operation is active on B , chunk_queues does not have an entry for B . <nl> + It could be a multimap instead , but that would mean depending on properties of multimaps that <nl> are not guaranteed by the C + + standard . * / <nl> <nl> - bitset_t active_chunks ; <nl> - std : : map < int , std : : deque < action_t * > > waiters ; <nl> + std : : map < int , std : : deque < action_t * > > chunk_queues ; <nl> } ; <nl> <nl> # include " arch / linux / disk / conflict_resolving . tcc " <nl> mmm a / src / arch / linux / disk / conflict_resolving . tcc <nl> ppp b / src / arch / linux / disk / conflict_resolving . tcc <nl> <nl> + # include " perfmon . hpp " <nl> + <nl> template < class payload_t > <nl> conflict_resolving_diskmgr_t < payload_t > : : conflict_resolving_diskmgr_t ( ) { } <nl> <nl> template < class payload_t > <nl> conflict_resolving_diskmgr_t < payload_t > : : ~ conflict_resolving_diskmgr_t ( ) { <nl> <nl> / * Make sure there are no requests still out . * / <nl> - rassert ( active_chunks . count ( ) = = 0 ) ; <nl> - rassert ( waiters . empty ( ) ) ; <nl> + rassert ( chunk_queues . empty ( ) ) ; <nl> } <nl> <nl> + / / Must be defined in some specific . cc file ! <nl> + extern perfmon_sampler_t pm_io_disk_stack_conflicts ; <nl> + <nl> template < class payload_t > <nl> void conflict_resolving_diskmgr_t < payload_t > : : submit ( action_t * action ) { <nl> <nl> void conflict_resolving_diskmgr_t < payload_t > : : submit ( action_t * action ) { <nl> int start , end ; <nl> get_range ( action , & start , & end ) ; <nl> <nl> - / * Expand the bit - vector if necessary * / <nl> - if ( end > ( int ) active_chunks . size ( ) ) active_chunks . resize ( end ) ; <nl> + / * If this is a read , we check whether there is a write from which we <nl> + can " steal " data to satisfy the read immediately . We currently only <nl> + do this if there is a single write operation that provides all the data <nl> + that we need , we don ' t combine multiple writes that affect different parts <nl> + of the read request . * / <nl> + if ( action - > get_is_read ( ) ) { <nl> + / * The logic here is a bit tricky . What we do is the following : <nl> + First we check the queue for the first chunk . If there is a write that <nl> + can satisfy us , it must span all chunks and therefore be on the first <nl> + chunk ' s queue . We pick the latest write that exists on that queue , so <nl> + we get the most recent version of the data . We then check the other <nl> + chunks and make sure that the same write is also the latest write on <nl> + these queues . If there is another more recent write , we cannot take <nl> + the data from our initial write , because a subrange of it will be <nl> + overwritten by that other write . As an optimization , we could <nl> + still use the data from the initial write and just replace that part of <nl> + it , using the data from the other write . We leave this extension as an <nl> + exercise to the reader . * / <nl> + <nl> + action_t * latest_write = NULL ; <nl> + <nl> + typename std : : map < int , std : : deque < action_t * > > : : iterator it ; <nl> + it = chunk_queues . find ( start ) ; <nl> + if ( it ! = chunk_queues . end ( ) ) { <nl> + std : : deque < action_t * > & queue = it - > second ; <nl> + <nl> + / * Locate the latest write on the queue * / <nl> + typename std : : deque < action_t * > : : reverse_iterator qrit ; <nl> + for ( qrit = queue . rbegin ( ) ; qrit ! = queue . rend ( ) ; + + qrit ) { <nl> + if ( ( * qrit ) - > get_is_write ( ) ) { <nl> + / * We found it ! Check if it ' s of any use to us . . . <nl> + If the range it was supposed to write is a superrange of <nl> + our range , then it ' s a valid candidate . * / <nl> + if ( ( * qrit ) - > get_offset ( ) < = action - > get_offset ( ) & & <nl> + ( * qrit ) - > get_offset ( ) + ( * qrit ) - > get_count ( ) > = action - > get_offset ( ) + action - > get_count ( ) ) { <nl> + <nl> + latest_write = * qrit ; <nl> + } <nl> + <nl> + / * No other write on the queue can be the latest one . Stop looking . * / <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + / * Now check that latest_write is also latest for all other chunks . <nl> + Keep on validating as long as we have a latest_write candidate . * / <nl> + for ( int block = start ; latest_write & & block < end ; block + + ) { <nl> + <nl> + it = chunk_queues . find ( start ) ; <nl> + rassert ( it ! = chunk_queues . end ( ) ) ; / / Note : At least latest_write should be there ! <nl> + std : : deque < action_t * > & queue = it - > second ; <nl> + <nl> + / * Locate the latest write on the queue * / <nl> + typename std : : deque < action_t * > : : reverse_iterator qrit ; <nl> + for ( qrit = queue . rbegin ( ) ; qrit ! = queue . rend ( ) ; + + qrit ) { <nl> + if ( ( * qrit ) - > get_is_write ( ) ) { <nl> + <nl> + if ( * qrit ! = latest_write ) { <nl> + / * This write is more recent than latest_write , so latest_write <nl> + isn ' t actually the latest one over the whole range , <nl> + This renders it unusable for us . * / <nl> + latest_write = NULL ; <nl> + } <nl> + <nl> + / * No other write on the queue can be the latest one . Stop looking . * / <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + <nl> + if ( latest_write ) { <nl> <nl> - / * Determine if there are conflicts * / <nl> + / * We can use the data from latest_write to fulfil the read immediately . * / <nl> + memcpy ( action - > get_buf ( ) , <nl> + ( const char * ) latest_write - > get_buf ( ) + action - > get_offset ( ) - latest_write - > get_offset ( ) , <nl> + action - > get_count ( ) ) ; <nl> + <nl> + done_fun ( action ) ; <nl> + return ; <nl> + } <nl> + } <nl> + <nl> + / * Determine if there are conflicts and put ourself on the queues * / <nl> action - > conflict_count = 0 ; <nl> for ( int block = start ; block < end ; block + + ) { <nl> <nl> - if ( active_chunks [ block ] ) { <nl> + typename std : : map < int , std : : deque < action_t * > > : : iterator it ; <nl> + it = chunk_queues . find ( block ) ; <nl> + <nl> + if ( it ! = chunk_queues . end ( ) ) { <nl> / * We conflict on this block . * / <nl> action - > conflict_count + + ; <nl> + } <nl> <nl> - / * Put ourself on the wait list . * / <nl> - typename std : : map < int , std : : deque < action_t * > > : : iterator it ; <nl> - it = waiters . lower_bound ( block ) ; <nl> - if ( it = = waiters . end ( ) | | it - > first ! = block ) { <nl> - / * Start a queue because there isn ' t one already * / <nl> - it = waiters . insert ( it , std : : make_pair ( block , std : : deque < action_t * > ( ) ) ) ; <nl> - } <nl> - rassert ( it - > first = = block ) ; <nl> - it - > second . push_back ( action ) ; <nl> - <nl> - } else { <nl> - / * Nothing else is using this block at the moment . Claim it for ourselves . * / <nl> - active_chunks . set ( block , true ) ; <nl> + / * Put ourself on the queue for this chunk * / <nl> + if ( it = = chunk_queues . end ( ) ) { <nl> + / * Start a queue because there isn ' t one already * / <nl> + it = chunk_queues . insert ( it , std : : make_pair ( block , std : : deque < action_t * > ( ) ) ) ; <nl> } <nl> + rassert ( it - > first = = block ) ; <nl> + it - > second . push_back ( action ) ; <nl> } <nl> <nl> - / * If we are no conflicts , we can start right away . * / <nl> + / * If there are no conflicts , we can start right away . * / <nl> if ( action - > conflict_count = = 0 ) { <nl> payload_t * payload = action ; <nl> submit_fun ( payload ) ; <nl> + } else { <nl> + / / TODO : Refine the perfmon such that it measures the actual time that ops spend <nl> + / / in a waiting state <nl> + pm_io_disk_stack_conflicts . record ( 1 ) ; <nl> } <nl> } <nl> <nl> void conflict_resolving_diskmgr_t < payload_t > : : done ( payload_t * payload ) { <nl> <nl> int start , end ; <nl> get_range ( action , & start , & end ) ; <nl> - rassert ( end < = ( int ) active_chunks . size ( ) ) ; / / act ( ) should have expanded the bitset if necessary <nl> <nl> / * Visit every block and see if anything is blocking on us . As we iterate <nl> over block indices , we iterate through the corresponding entries in the map . * / <nl> <nl> - typename std : : map < int , std : : deque < action_t * > > : : iterator it = waiters . lower_bound ( start ) ; <nl> + typename std : : map < int , std : : deque < action_t * > > : : iterator it = chunk_queues . find ( start ) ; <nl> for ( int block = start ; block < end ; block + + ) { <nl> <nl> - rassert ( it = = waiters . end ( ) | | it - > first > = block ) ; <nl> + / * We can assert this because at lest we must still be on the queue * / <nl> + rassert ( it ! = chunk_queues . end ( ) & & it - > first = = block ) ; <nl> <nl> - if ( it ! = waiters . end ( ) & & it - > first = = block ) { <nl> + std : : deque < action_t * > & queue = it - > second ; <nl> <nl> - / * Something was blocking on us for this block . Pop the first waiter from the queue . * / <nl> - std : : deque < action_t * > & queue = it - > second ; <nl> - rassert ( ! queue . empty ( ) ) ; <nl> - action_t * waiter = queue . front ( ) ; <nl> - queue . pop_front ( ) ; <nl> + / * Remove ourselves from the queue * / <nl> + rassert ( queue . front ( ) = = action ) ; <nl> + queue . pop_front ( ) ; <nl> <nl> - / * If there are no other waiters , remove the queue ; else move past it . * / <nl> - if ( queue . empty ( ) ) waiters . erase ( it + + ) ; <nl> - else it + + ; <nl> + if ( ! queue . empty ( ) ) { <nl> + / * Continue with the next chunk queue . <nl> + We have to move on now , because we might call done ( ) recursively and that might <nl> + invalidate the iterator . * / <nl> + + + it ; <nl> + <nl> + / * Something was blocking on us for this block . Get the first waiter from the queue . * / <nl> + action_t * waiter = queue . front ( ) ; <nl> <nl> rassert ( waiter - > conflict_count > 0 ) ; <nl> waiter - > conflict_count - - ; <nl> void conflict_resolving_diskmgr_t < payload_t > : : done ( payload_t * payload ) { <nl> our current location , or else its conflict_count would still be nonzero . <nl> Therefore it will not touch any part of the multimap that we have not <nl> yet gotten to . If it touches the queue that we popped it from , that ' s <nl> - also safe , because we ' ve already moved our iterator past it . * / <nl> + also safe , because we ' ve already moved our iterator past it . <nl> + Note that we can potentially lose some short - circuit opportunities , <nl> + because we might be able to provide a larger range than the request <nl> + that we just provided a subset of our data to . So that request might <nl> + not be able to short - circuit another watier , while we might have <nl> + been able . This should be more of an academic concern though . * / <nl> done ( waiter ) ; <nl> <nl> } else { <nl> void conflict_resolving_diskmgr_t < payload_t > : : done ( payload_t * payload ) { <nl> submit_fun ( waiter_payload ) ; <nl> } <nl> } <nl> - <nl> } else { <nl> - <nl> - / * Nothing was waiting for this particular part of the file * / <nl> - active_chunks . set ( block , false ) ; <nl> + / * The queue is empty , erase it . * / <nl> + chunk_queues . erase ( it + + ) ; <nl> } <nl> } <nl> <nl> mmm a / src / arch / linux / disk / pool . hpp <nl> ppp b / src / arch / linux / disk / pool . hpp <nl> struct pool_diskmgr_t : <nl> offset = o ; <nl> } <nl> <nl> + bool get_is_write ( ) const { return ! is_read ; } <nl> bool get_is_read ( ) const { return is_read ; } <nl> fd_t get_fd ( ) const { return fd ; } <nl> void * get_buf ( ) const { return buf ; } <nl> mmm a / src / unittest / disk_conflict_resolution . cc <nl> ppp b / src / unittest / disk_conflict_resolution . cc <nl> namespace unittest { <nl> struct test_driver_t { <nl> <nl> struct core_action_t : public intrusive_list_node_t < core_action_t > { <nl> + bool get_is_write ( ) const { return ! is_read ; } <nl> bool get_is_read ( ) const { return is_read ; } <nl> void * get_buf ( ) const { return buf ; } <nl> size_t get_count ( ) const { return count ; } <nl>
Implement short - circuit serving of reads , if a corresponding write is currently active or in the wait queue of the conflict resolving disk manager .
rethinkdb/rethinkdb
e7ea48e2de79ee239ed300a17443f8de76872279
2011-06-24T23:11:18Z
mmm a / bindings / python / cntk / utils / __init__ . py <nl> ppp b / bindings / python / cntk / utils / __init__ . py <nl> def one_hot ( batch , num_classes , dtype = None , device = None ) : <nl> [ 0 . , 0 . , 0 . , 0 . , 0 . , 1 . ] ] , dtype = float32 ) , array ( [ [ 0 . , 0 . , 0 . , 0 . , 1 . , 0 . ] ] , dtype = float32 ) ] <nl> <nl> Args : <nl> - batch ( NumPy array or list ( of lists , if sequence ) of index data ) : batch input data <nl> + batch ( list of lists of integers ) : batch input data of indices <nl> num_classes ( int ) : number of classes <nl> dtype ( ` np . float32 ` , ` np . float64 ` , default None ) : data type <nl> device ( : class : ` ~ cntk . device . DeviceDescriptor ` , default None ) : device <nl> def one_hot ( batch , num_classes , dtype = None , device = None ) : <nl> if isinstance ( batch , np . ndarray ) : <nl> batch = batch . tolist ( ) <nl> <nl> + try : <nl> + data_type = type ( batch [ 0 ] [ 0 ] ) <nl> + except : <nl> + raise ValueError ( ' input must be a list of list of integers ' ) <nl> + <nl> + if data_type ! = int : <nl> + raise ValueError ( ' supplied data to one_hot ( ) must be of type integer ' <nl> + ' and not " % s " since it is index data . ' % data_type ) <nl> if dtype in [ np . float32 , None ] : <nl> value = cntk_py . Value . create_one_hot_float ( num_classes , batch , device , False ) <nl> elif dtype = = np . float64 : <nl> mmm a / bindings / python / cntk / utils / tests / utils_test . py <nl> ppp b / bindings / python / cntk / utils / tests / utils_test . py <nl> def test_sanitize_batch_sparse ( ) : <nl> ( one_hot ( [ [ 3 , 4 , 5 , 1 ] , [ 60 , 61 ] ] , num_classes = 62 ) , <nl> [ True , False ] , <nl> ValueError ) , <nl> - # [ [ 2 , 1 , 1 , 1 ] , [ 2 , 1 , 0 , 0 ] ] ) , <nl> ] ) <nl> def test_mask ( batch , seq_starts , expected ) : <nl> shape = ( ) <nl> def test_mask ( batch , seq_starts , expected ) : <nl> s = sanitize_batch ( var , batch , seq_starts ) <nl> assert np . allclose ( s . mask , expected ) <nl> <nl> + def test_one_hot ( ) : <nl> + with pytest . raises ( ValueError ) : <nl> + s = one_hot ( [ [ 1 . 0 , 2 . 0 ] , [ 3 . ] ] , 4 ) <nl> + with pytest . raises ( ValueError ) : <nl> + s = one_hot ( [ 1 , 2 ] , 4 ) <nl> + <nl> def test_sanitize_batch_contiguity ( ) : <nl> a1 = AA ( [ [ 1 , 2 ] , [ 3 , 4 ] ] ) <nl> a2 = AA ( [ [ 5 , 6 ] , [ 7 , 8 ] ] ) <nl>
Improve data type handling for one_hot
microsoft/CNTK
124d325028a16836e7796543d7232b72f4484005
2017-01-17T11:15:18Z
mmm a / addons / skin . estouchy / xml / DialogPVRGroupManager . xml <nl> ppp b / addons / skin . estouchy / xml / DialogPVRGroupManager . xml <nl> <nl> < posy > 0 < / posy > <nl> < include > WindowTitleCommons < / include > <nl> < width > 1040 < / width > <nl> - < label > $ LOCALIZE [ 19143 ] - $ LOCALIZE [ 19023 ] < / label > <nl> + < label > $ LOCALIZE [ 19143 ] - $ LOCALIZE [ 19173 ] < / label > <nl> < visible > String . IsEmpty ( Window . Property ( IsRadio ) ) < / visible > <nl> < / control > <nl> < control type = " label " > <nl> <nl> < posy > 0 < / posy > <nl> < include > WindowTitleCommons < / include > <nl> < width > 1040 < / width > <nl> - < label > $ LOCALIZE [ 19143 ] - $ LOCALIZE [ 19024 ] < / label > <nl> + < label > $ LOCALIZE [ 19143 ] - $ LOCALIZE [ 19174 ] < / label > <nl> < visible > ! String . IsEmpty ( Window . Property ( IsRadio ) ) < / visible > <nl> < / control > <nl> < control type = " group " > <nl> <nl> < include > ButtonInfoDialogsCommonValues < / include > <nl> < label > 31563 < / label > <nl> < / control > <nl> + < control type = " togglebutton " id = " 34 " > <nl> + < description > TV / Radio button < / description > <nl> + < width > 200 < / width > <nl> + < include > ButtonInfoDialogsCommonValues < / include > <nl> + < label > 19174 < / label > <nl> + < altlabel > 19173 < / altlabel > <nl> + < alttexturefocus colordiffuse = " blue " border = " 15 " > dialogbutton - focus . png < / alttexturefocus > <nl> + < alttexturenofocus border = " 15 " > dialogbutton - nofocus . png < / alttexturenofocus > <nl> + < usealttexture > ! String . IsEmpty ( Window . Property ( IsRadio ) ) < / usealttexture > <nl> + < / control > <nl> < control type = " button " id = " 29 " > <nl> < description > OK button < / description > <nl> < width > 200 < / width > <nl>
[ Estouchy ] add support for radio channel groups
xbmc/xbmc
c4ade5bc30945f3da436f3197828f2db1c524929
2017-03-20T20:29:38Z
mmm a / hphp / hack / test / typecheck / async_block4 . php . exp <nl> ppp b / hphp / hack / test / typecheck / async_block4 . php . exp <nl> <nl> - File " async_block4 . php " , line 8 , characters 10 - 19 : <nl> - Invalid return type ( Typing [ 4110 ] ) <nl> - File " async_block4 . php " , line 3 , characters 31 - 36 : <nl> - This is a string <nl> - File " async_block4 . php " , line 6 , characters 12 - 13 : <nl> - It is incompatible with an int <nl> + File " async_block4 . php " , line 5 , characters 15 - 39 : <nl> + Unexpected use of async { . . . } as lambda expression ( Parsing [ 1002 ] ) <nl> mmm a / hphp / hack / test / typecheck / namespace_group_use . php <nl> ppp b / hphp / hack / test / typecheck / namespace_group_use . php <nl> <nl> < ? hh / / strict <nl> <nl> + / / TODO ( T30806984 ) : this test is broken <nl> + <nl> namespace Foo \ Bar \ Baz { <nl> function f1 ( ) : void { } <nl> function f2 ( ) : void { } <nl> mmm a / hphp / hack / test / typecheck / namespace_group_use . php . exp <nl> ppp b / hphp / hack / test / typecheck / namespace_group_use . php . exp <nl> @ @ - 1 + 1 , 2 @ @ <nl> - No errors <nl> + File " namespace_group_use . php " , line 25 , characters 29 - 29 : <nl> + Cannot use function f1 as f1 because the name is already in use ( Parsing [ 1002 ] ) <nl>
" Fix " typecheck tests
facebook/hhvm
5fb94b9d73dd497b95b92340919bcd51514dfe36
2018-06-22T23:57:24Z
mmm a / docs / html / md__support . html <nl> ppp b / docs / html / md__support . html <nl> <nl> < td > Platform : iOS < / td > < td > ? < / td > < td > ? < / td > < td > ? < / td > < td > ? < / td > < td > ? < / td > < td > ? < / td > < / tr > <nl> < tr > <nl> < td > Engine : Unity < / td > < td > ? < / td > < td > ? < / td > < td > Yes < / td > < td > ? < / td > < td > ? < / td > < td > ? < / td > < / tr > <nl> + < tr > <nl> + < td > Primary authors ( github ) < / td > < td > wvo < / td > < td > wvo < / td > < td > ( ev / js ) < / td > < td > rw < / td > < td > rw < / td > < td > ( ev ) < / td > < / tr > <nl> < / table > <nl> + < ul > <nl> + < li > ev = evolutional < / li > <nl> + < li > js = jonsimantov < / li > <nl> + < / ul > <nl> < / div > < / div > < ! - - contents - - > <nl> < / div > < ! - - doc - content - - > <nl> < ! - - Google Analytics - - > <nl> mmm a / docs / source / Support . md <nl> ppp b / docs / source / Support . md <nl> Platform : OS X | Xcode4 | ? | ? | ? | Yes | ? <nl> Platform : Android | NDK10d | Yes | ? | ? | ? | ? <nl> Platform : iOS | ? | ? | ? | ? | ? | ? <nl> Engine : Unity | ? | ? | Yes | ? | ? | ? <nl> + Primary authors ( github ) | wvo | wvo | ( ev / js ) | rw | rw | ( ev ) <nl> <nl> - <nl> + * ev = evolutional <nl> + * js = jonsimantov <nl>
Added authors to support . md
google/flatbuffers
a170b69d5d5bdc230a3ff1ed6e341e00f5969b8f
2015-08-14T21:07:54Z
mmm a / src / Core / Settings . h <nl> ppp b / src / Core / Settings . h <nl> struct Settings : public SettingsCollection < Settings > <nl> M ( SettingInt64 , os_thread_priority , 0 , " If non zero - set corresponding ' nice ' value for query processing threads . Can be used to adjust query priority for OS scheduler . " , 0 ) \ <nl> \ <nl> M ( SettingBool , log_queries , 1 , " Log requests and write the log to the system table . " , 0 ) \ <nl> - M ( SettingLogQueriesType , log_queries_min_type , QueryLogElementType : : QUERY_START , " query_log minimal type to log , possible values ( from low to high ) : QUERY_START , QUERY_FINISH , EXCEPTION_BEFORE_START , EXCEPTION_WHILE_PROCESSING . " , 0 ) \ <nl> + M ( SettingLogQueriesType , log_queries_min_type , QueryLogElementType : : QUERY_START , " Minimal type in query_log to log , possible values ( from low to high ) : QUERY_START , QUERY_FINISH , EXCEPTION_BEFORE_START , EXCEPTION_WHILE_PROCESSING . " , 0 ) \ <nl> M ( SettingUInt64 , log_queries_cut_to_length , 100000 , " If query length is greater than specified threshold ( in bytes ) , then cut query when writing to query log . Also limit length of printed query in ordinary text log . " , 0 ) \ <nl> \ <nl> M ( SettingDistributedProductMode , distributed_product_mode , DistributedProductMode : : DENY , " How are distributed subqueries performed inside IN or JOIN sections ? " , IMPORTANT ) \ <nl> struct Settings : public SettingsCollection < Settings > <nl> M ( SettingBool , enable_scalar_subquery_optimization , true , " If it is set to true , prevent scalar subqueries from ( de ) serializing large scalar values and possibly avoid running the same subquery more than once . " , 0 ) \ <nl> M ( SettingBool , optimize_trivial_count_query , true , " Process trivial ' SELECT count ( ) FROM table ' query from metadata . " , 0 ) \ <nl> M ( SettingUInt64 , mutations_sync , 0 , " Wait for synchronous execution of ALTER TABLE UPDATE / DELETE queries ( mutations ) . 0 - execute asynchronously . 1 - wait current server . 2 - wait all replicas if they exist . " , 0 ) \ <nl> - M ( SettingBool , optimize_any_input , true , " removal of any operations from Any " , 0 ) \ <nl> + M ( SettingBool , optimize_move_functions_out_of_any , true , " Move functions out of aggregate functions ' any ' , ' anyLast ' . " , 0 ) \ <nl> M ( SettingBool , optimize_arithmetic_operations_in_aggregate_functions , true , " Move arithmetic operations out of aggregation functions " , 0 ) \ <nl> M ( SettingBool , optimize_duplicate_order_by_and_distinct , true , " Remove duplicate ORDER BY and DISTINCT if it ' s possible " , 0 ) \ <nl> M ( SettingBool , optimize_if_chain_to_miltiif , false , " Replace if ( cond1 , then1 , if ( cond2 , . . . ) ) chains to multiIf . Currently it ' s not beneficial for numeric types . " , 0 ) \ <nl> mmm a / src / Interpreters / SyntaxAnalyzer . cpp <nl> ppp b / src / Interpreters / SyntaxAnalyzer . cpp <nl> GroupByKeysInfo getGroupByKeysInfo ( ASTs & group_keys ) <nl> } <nl> <nl> / / / eliminate functions of other GROUP BY keys <nl> - void optimizeGroupByFunctionKeys ( ASTSelectQuery * select_query , bool optimize_group_by_function_keys ) <nl> + void optimizeGroupByFunctionKeys ( ASTSelectQuery * select_query ) <nl> { <nl> - if ( ! optimize_group_by_function_keys ) <nl> - return ; <nl> - <nl> if ( ! select_query - > groupBy ( ) ) <nl> return ; <nl> <nl> void optimizeGroupByFunctionKeys ( ASTSelectQuery * select_query , bool optimize_gr <nl> } <nl> <nl> / / / Eliminates min / max / any - aggregators of functions of GROUP BY keys <nl> - void optimizeAggregateFunctionsOfGroupByKeys ( ASTSelectQuery * select_query , bool optimize_aggregators_of_group_by_keys ) <nl> + void optimizeAggregateFunctionsOfGroupByKeys ( ASTSelectQuery * select_query ) <nl> { <nl> - if ( ! optimize_aggregators_of_group_by_keys ) <nl> - return ; <nl> - <nl> if ( ! select_query - > groupBy ( ) ) <nl> return ; <nl> <nl> void optimizeOrderBy ( const ASTSelectQuery * select_query ) <nl> } <nl> <nl> / / / Optimize duplicate ORDER BY and DISTINCT <nl> - void optimizeDuplicateOrderByAndDistinct ( ASTPtr & query , bool optimize_duplicate_order_by_and_distinct , const Context & context ) <nl> + void optimizeDuplicateOrderByAndDistinct ( ASTPtr & query , const Context & context ) <nl> { <nl> - if ( optimize_duplicate_order_by_and_distinct ) <nl> - { <nl> - DuplicateOrderByVisitor : : Data order_by_data { context , false } ; <nl> - DuplicateOrderByVisitor ( order_by_data ) . visit ( query ) ; <nl> - DuplicateDistinctVisitor : : Data distinct_data { } ; <nl> - DuplicateDistinctVisitor ( distinct_data ) . visit ( query ) ; <nl> - } <nl> + DuplicateOrderByVisitor : : Data order_by_data { context , false } ; <nl> + DuplicateOrderByVisitor ( order_by_data ) . visit ( query ) ; <nl> + DuplicateDistinctVisitor : : Data distinct_data { } ; <nl> + DuplicateDistinctVisitor ( distinct_data ) . visit ( query ) ; <nl> } <nl> <nl> / / / Remove duplicate items from LIMIT BY . <nl> void optimizeArithmeticOperationsInAgr ( ASTPtr & query , bool optimize_arithmetic_ <nl> } <nl> } <nl> <nl> - void optimizeAnyInput ( ASTPtr & query , bool optimize_any_input ) <nl> + void optimizeAnyInput ( ASTPtr & query ) <nl> { <nl> - if ( optimize_any_input ) <nl> - { <nl> - / / / Removing arithmetic operations from functions <nl> - AnyInputVisitor : : Data data = { } ; <nl> - AnyInputVisitor ( data ) . visit ( query ) ; <nl> - } <nl> + / / / Removing arithmetic operations from functions <nl> + AnyInputVisitor : : Data data = { } ; <nl> + AnyInputVisitor ( data ) . visit ( query ) ; <nl> } <nl> <nl> void getArrayJoinedColumns ( ASTPtr & query , SyntaxAnalyzerResult & result , const ASTSelectQuery * select_query , <nl> SyntaxAnalyzerResultPtr SyntaxAnalyzer : : analyzeSelect ( <nl> optimizeGroupBy ( select_query , source_columns_set , context ) ; <nl> <nl> / / / GROUP BY functions of other keys elimination . <nl> - optimizeGroupByFunctionKeys ( select_query , settings . optimize_group_by_function_keys ) ; <nl> + if ( settings . optimize_group_by_function_keys ) <nl> + optimizeGroupByFunctionKeys ( select_query ) ; <nl> <nl> / / / Move all operations out of any function <nl> - optimizeAnyInput ( query , settings . optimize_any_input ) ; <nl> + if ( settings . optimize_move_functions_out_of_any ) <nl> + optimizeAnyInput ( query ) ; <nl> <nl> / / / Eliminate min / max / any aggregators of functions of GROUP BY keys <nl> - optimizeAggregateFunctionsOfGroupByKeys ( select_query , settings . optimize_aggregators_of_group_by_keys ) ; <nl> + if ( settings . optimize_aggregators_of_group_by_keys ) <nl> + optimizeAggregateFunctionsOfGroupByKeys ( select_query ) ; <nl> <nl> / / / Remove duplicate items from ORDER BY . <nl> optimizeOrderBy ( select_query ) ; <nl> <nl> / / / Remove duplicate ORDER BY and DISTINCT from subqueries . <nl> - optimizeDuplicateOrderByAndDistinct ( query , settings . optimize_duplicate_order_by_and_distinct , context ) ; <nl> + if ( settings . optimize_duplicate_order_by_and_distinct ) <nl> + optimizeDuplicateOrderByAndDistinct ( query , context ) ; <nl> <nl> / / / Remove duplicated elements from LIMIT BY clause . <nl> optimizeLimitBy ( select_query ) ; <nl> mmm a / tests / queries / 0_stateless / 01321_aggregate_functions_of_group_by_keys . sql <nl> ppp b / tests / queries / 0_stateless / 01321_aggregate_functions_of_group_by_keys . sql <nl> <nl> set optimize_aggregators_of_group_by_keys = 1 ; <nl> set enable_debug_queries = 1 ; <nl> - set optimize_any_input = 0 ; <nl> + set optimize_move_functions_out_of_any = 0 ; <nl> <nl> SELECT min ( number % 2 ) AS a , max ( number % 3 ) AS b FROM numbers ( 10000000 ) GROUP BY number % 2 , number % 3 ORDER BY a , b ; <nl> SELECT any ( number % 2 ) AS a , anyLast ( number % 3 ) AS b FROM numbers ( 10000000 ) GROUP BY number % 2 , number % 3 ORDER BY a , b ; <nl> mmm a / tests / queries / 0_stateless / 01322_any_input_optimize . sql <nl> ppp b / tests / queries / 0_stateless / 01322_any_input_optimize . sql <nl> <nl> - SET optimize_any_input = 1 ; <nl> + SET optimize_move_functions_out_of_any = 1 ; <nl> SET enable_debug_queries = 1 ; <nl> SELECT any ( number + number * 2 ) FROM numbers ( 3 , 10 ) ; <nl> ANALYZE SELECT any ( number + number * 2 ) FROM numbers ( 3 , 10 ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 5a57be5fbb2 <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01324_settings_documentation . reference <nl> @ @ - 0 , 0 + 1 @ @ <nl> + Settings description should start with capital letter <nl> new file mode 100644 <nl> index 00000000000 . . 15736f3bc83 <nl> mmm / dev / null <nl> ppp b / tests / queries / 0_stateless / 01324_settings_documentation . sql <nl> <nl> + SELECT ' Settings description should start with capital letter ' ; <nl> + SELECT name , description FROM system . settings WHERE substring ( description , 1 , 1 ) ! = upper ( substring ( description , 1 , 1 ) ) ; <nl>
Merge pull request from ClickHouse / fix - bad - code - optimizations
ClickHouse/ClickHouse
f9372bfbbbf1a5933752ee216f9ede777556b7ed
2020-06-20T13:49:23Z
mmm a / include / swift / AST / DiagnosticsSema . def <nl> ppp b / include / swift / AST / DiagnosticsSema . def <nl> NOTE ( override_unnecessary_IUO_use_strict , none , <nl> NOTE ( override_unnecessary_IUO_silence , none , <nl> " add parentheses to silence this warning " , ( ) ) <nl> <nl> - ERROR ( iuo_in_illegal_position , none , <nl> - " implicitly unwrapped optionals are only allowed at top level and as " <nl> - " function results " , ( ) ) <nl> - <nl> ERROR ( override_mutable_covariant_property , none , <nl> " cannot override mutable property % 0 of type % 1 with covariant type % 2 " , <nl> ( Identifier , Type , Type ) ) <nl> ERROR ( tuple_single_element , none , <nl> ERROR ( tuple_ellipsis , none , <nl> " cannot create a variadic tuple " , ( ) ) <nl> <nl> + WARNING ( implicitly_unwrapped_optional_spelling_deprecated , none , <nl> + " the spelling ' ImplicitlyUnwrappedOptional ' is deprecated " , ( ) ) <nl> + <nl> + WARNING ( implicitly_unwrapped_optional_spelling_deprecated_with_fixit , none , <nl> + " the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name " , ( ) ) <nl> + <nl> + ERROR ( iuo_in_illegal_position , none , <nl> + " implicitly unwrapped optionals are only allowed at top level and as " <nl> + " function results " , ( ) ) <nl> + <nl> / / Ownership <nl> ERROR ( invalid_ownership_type , none , <nl> " ' % select { strong | weak | unowned | unowned } 0 ' may only be applied to " <nl> mmm a / lib / Sema / TypeCheckConstraints . cpp <nl> ppp b / lib / Sema / TypeCheckConstraints . cpp <nl> bool PreCheckExpression : : walkToClosureExprPre ( ClosureExpr * closure ) { <nl> options | = TR_AllowUnspecifiedTypes ; <nl> options | = TR_AllowUnboundGenerics ; <nl> options | = TR_InExpression ; <nl> + options | = TR_AllowIUO ; <nl> bool hadParameterError = false ; <nl> <nl> GenericTypeToArchetypeResolver resolver ( closure ) ; <nl> mmm a / lib / Sema / TypeCheckDecl . cpp <nl> ppp b / lib / Sema / TypeCheckDecl . cpp <nl> static void validatePatternBindingEntry ( TypeChecker & tc , <nl> / / top - level variables in a script file are accessible from other files , <nl> / / even though the PBD is inside a TopLevelCodeDecl . <nl> TypeResolutionOptions options = TR_InExpression ; <nl> + <nl> + options | = TR_AllowIUO ; <nl> if ( binding - > getInit ( entryNumber ) ) { <nl> / / If we have an initializer , we can also have unknown types . <nl> options | = TR_AllowUnspecifiedTypes ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> GenericTypeToArchetypeResolver resolver ( SD ) ; <nl> <nl> bool isInvalid = TC . validateType ( SD - > getElementTypeLoc ( ) , SD , <nl> - TypeResolutionOptions ( ) , <nl> + TR_AllowIUO , <nl> & resolver ) ; <nl> + TypeResolutionOptions options ; <nl> + options | = TR_SubscriptParameters ; <nl> + options | = TR_AllowIUO ; <nl> + <nl> isInvalid | = TC . typeCheckParameterList ( SD - > getIndices ( ) , SD , <nl> - TR_SubscriptParameters , <nl> + options , <nl> resolver ) ; <nl> <nl> if ( isInvalid | | SD - > isInvalid ( ) ) { <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> GenericTypeResolver & resolver ) { <nl> bool hadError = false ; <nl> for ( auto paramList : fd - > getParameterLists ( ) ) { <nl> - hadError | = TC . typeCheckParameterList ( paramList , fd , <nl> - TypeResolutionOptions ( ) , <nl> - resolver ) ; <nl> + hadError | = <nl> + TC . typeCheckParameterList ( paramList , fd , TR_AllowIUO , resolver ) ; <nl> } <nl> <nl> return hadError ; <nl> class DeclChecker : public DeclVisitor < DeclChecker > { <nl> <nl> bool badType = false ; <nl> if ( ! FD - > getBodyResultTypeLoc ( ) . isNull ( ) ) { <nl> - TypeResolutionOptions options ; <nl> + TypeResolutionOptions options = TR_AllowIUO ; <nl> if ( FD - > hasDynamicSelf ( ) ) <nl> options | = TR_DynamicSelfResult ; <nl> + <nl> if ( TC . validateType ( FD - > getBodyResultTypeLoc ( ) , FD , options , <nl> & resolver ) ) { <nl> badType = true ; <nl> mmm a / lib / Sema / TypeCheckGeneric . cpp <nl> ppp b / lib / Sema / TypeCheckGeneric . cpp <nl> static bool checkGenericFuncSignature ( TypeChecker & tc , <nl> / / Check the parameter patterns . <nl> for ( auto params : func - > getParameterLists ( ) ) { <nl> / / Check the pattern . <nl> - if ( tc . typeCheckParameterList ( params , func , TypeResolutionOptions ( ) , <nl> + if ( tc . typeCheckParameterList ( params , func , TR_AllowIUO , <nl> resolver ) ) <nl> badType = true ; <nl> <nl> static bool checkGenericFuncSignature ( TypeChecker & tc , <nl> if ( auto fn = dyn_cast < FuncDecl > ( func ) ) { <nl> if ( ! fn - > getBodyResultTypeLoc ( ) . isNull ( ) ) { <nl> / / Check the result type of the function . <nl> - TypeResolutionOptions options ; <nl> + TypeResolutionOptions options = TR_AllowIUO ; <nl> if ( fn - > hasDynamicSelf ( ) ) <nl> options | = TR_DynamicSelfResult ; <nl> <nl> static bool checkGenericSubscriptSignature ( TypeChecker & tc , <nl> / / Check the indices . <nl> auto params = subscript - > getIndices ( ) ; <nl> <nl> + TypeResolutionOptions options ; <nl> + options | = TR_SubscriptParameters ; <nl> + options | = TR_AllowIUO ; <nl> + <nl> badType | = tc . typeCheckParameterList ( params , subscript , <nl> - TR_SubscriptParameters , <nl> + options , <nl> resolver ) ; <nl> <nl> / / Infer requirements from the pattern . <nl> mmm a / lib / Sema / TypeCheckType . cpp <nl> ppp b / lib / Sema / TypeCheckType . cpp <nl> resolveTopLevelIdentTypeComponent ( TypeChecker & TC , DeclContext * DC , <nl> return ErrorType : : get ( TC . Context ) ; <nl> } <nl> <nl> + / / Emit a warning about directly spelling <nl> + / / ImplicitlyUnwrappedOptional rather than using a trailing ' ! ' . <nl> + auto * IUODecl = TC . Context . getImplicitlyUnwrappedOptionalDecl ( ) ; <nl> + if ( currentDecl = = IUODecl ) { <nl> + if ( isa < GenericIdentTypeRepr > ( comp ) & & options . contains ( TR_AllowIUO ) ) { <nl> + auto * genericTyR = cast < GenericIdentTypeRepr > ( comp ) ; <nl> + assert ( genericTyR - > getGenericArgs ( ) . size ( ) = = 1 ) ; <nl> + auto * genericArgTyR = genericTyR - > getGenericArgs ( ) [ 0 ] ; <nl> + <nl> + TC . diagnose ( <nl> + comp - > getStartLoc ( ) , <nl> + diag : : implicitly_unwrapped_optional_spelling_deprecated_with_fixit ) <nl> + . fixItRemoveChars ( <nl> + genericTyR - > getStartLoc ( ) , <nl> + genericTyR - > getAngleBrackets ( ) . Start . getAdvancedLoc ( 1 ) ) <nl> + . fixItInsertAfter ( genericArgTyR - > getEndLoc ( ) , " ! " ) <nl> + . fixItRemoveChars ( <nl> + genericTyR - > getAngleBrackets ( ) . End , <nl> + genericTyR - > getAngleBrackets ( ) . End . getAdvancedLoc ( 1 ) ) ; <nl> + } else { <nl> + TC . diagnose ( comp - > getStartLoc ( ) , <nl> + diag : : implicitly_unwrapped_optional_spelling_deprecated ) ; <nl> + } <nl> + } <nl> + <nl> / / If we found nothing , complain and give ourselves a chance to recover . <nl> if ( current . isNull ( ) ) { <nl> / / If we ' re not allowed to complain or we couldn ' t fix the <nl> mmm a / lib / Sema / TypeChecker . h <nl> ppp b / lib / Sema / TypeChecker . h <nl> enum TypeResolutionFlags : unsigned { <nl> <nl> / / / Whether we are checking the parameter list of a subscript . <nl> TR_SubscriptParameters = 0x2000000 , <nl> + <nl> + / / / Is it okay to resolve an IUO sigil ( " ! " ) here ? <nl> + TR_AllowIUO = 0x4000000 , <nl> } ; <nl> <nl> / / / Option set describing how type resolution should work . <nl> new file mode 100644 <nl> index 000000000000 . . 6fc65258953a <nl> mmm / dev / null <nl> ppp b / test / Sema / diag_deprecated_iuo . swift <nl> <nl> + / / RUN : % target - typecheck - verify - swift <nl> + <nl> + let _ : ImplicitlyUnwrappedOptional < Int > = 1 / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } { { 8 - 36 = } } { { 39 - 39 = ! } } { { 39 - 40 = } } <nl> + let _ : ImplicitlyUnwrappedOptional = 1 / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated } } <nl> + <nl> + extension ImplicitlyUnwrappedOptional { } / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated } } <nl> + <nl> + func function ( <nl> + _ : ImplicitlyUnwrappedOptional < Int > / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } { { 6 - 34 = } } { { 37 - 37 = ! } } { { 37 - 38 = } } <nl> + ) - > ImplicitlyUnwrappedOptional < Int > { / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } { { 6 - 34 = } } { { 37 - 37 = ! } } { { 37 - 38 = } } <nl> + return 1 <nl> + } <nl> + <nl> + func genericFunction < T > ( <nl> + iuo : ImplicitlyUnwrappedOptional < T > / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } { { 8 - 36 = } } { { 37 - 37 = ! } } { { 37 - 38 = } } <nl> + ) - > ImplicitlyUnwrappedOptional < T > { / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } { { 6 - 34 = } } { { 35 - 35 = ! } } { { 35 - 36 = } } <nl> + return iuo <nl> + } <nl> + <nl> + protocol P { <nl> + associatedtype T <nl> + associatedtype U <nl> + } <nl> + <nl> + struct S : P { <nl> + typealias T = ImplicitlyUnwrappedOptional < Int > / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated } } <nl> + typealias U = Optional < ImplicitlyUnwrappedOptional < Int > > / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated } } <nl> + <nl> + subscript ( <nl> + index : ImplicitlyUnwrappedOptional < Int > / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } { { 12 - 40 = } } { { 43 - 43 = ! } } { { 43 - 44 = } } <nl> + ) - > ImplicitlyUnwrappedOptional < Int > { / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } { { 12 - 40 = } } { { 43 - 43 = ! } } { { 43 - 44 = } } <nl> + return index <nl> + } <nl> + } <nl> + <nl> + func generic < T : P > ( _ : T ) where T . T = = ImplicitlyUnwrappedOptional < Int > { } / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated } } <nl> + func genericOptIUO < T : P > ( _ : T ) where T . U = = Optional < ImplicitlyUnwrappedOptional < Int > > { } / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated } } <nl> + <nl> + func testClosure ( ) - > Int { <nl> + return { <nl> + ( i : ImplicitlyUnwrappedOptional < Int > ) / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } { { 9 - 37 = } } { { 40 - 40 = ! } } { { 40 - 41 = } } <nl> + - > ImplicitlyUnwrappedOptional < Int > in / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated } } <nl> + return i <nl> + } ( 1 ) <nl> + } <nl> mmm a / test / attr / attr_escaping . swift <nl> ppp b / test / attr / attr_escaping . swift <nl> func testModuloOptionalness ( ) { <nl> func setIUOClosure ( _ fn : ( ) - > Void ) { / / expected - note { { parameter ' fn ' is implicitly non - escaping } } { { 28 - 28 = @ escaping } } <nl> iuoClosure = fn / / expected - error { { assigning non - escaping parameter ' fn ' to an @ escaping closure } } <nl> } <nl> - var iuoClosureExplicit : ImplicitlyUnwrappedOptional < ( ) - > Void > <nl> + var iuoClosureExplicit : ImplicitlyUnwrappedOptional < ( ) - > Void > / / expected - warning { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } <nl> func setExplicitIUOClosure ( _ fn : ( ) - > Void ) { / / expected - note { { parameter ' fn ' is implicitly non - escaping } } { { 36 - 36 = @ escaping } } <nl> iuoClosureExplicit = fn / / expected - error { { assigning non - escaping parameter ' fn ' to an @ escaping closure } } <nl> } <nl> mmm a / test / decl / class / override . swift <nl> ppp b / test / decl / class / override . swift <nl> class IUOTestSubclass2 : IUOTestBaseClass { <nl> / / expected - note @ - 1 { { remove ' ! ' to make the parameter required } } { { 36 - 37 = } } <nl> / / expected - note @ - 2 { { add parentheses to silence this warning } } { { 27 - 27 = ( } } { { 37 - 37 = ) } } <nl> <nl> - override func oneB ( x : ImplicitlyUnwrappedOptional < AnyObject > ) { } / / expected - warning { { overriding instance method parameter of type ' AnyObject ' with implicitly unwrapped optional type ' ImplicitlyUnwrappedOptional < AnyObject > ' } } <nl> - / / expected - note @ - 1 { { add parentheses to silence this warning } } { { 25 - 25 = ( } } { { 63 - 63 = ) } } <nl> + override func oneB ( x : ImplicitlyUnwrappedOptional < AnyObject > ) { } <nl> + / / expected - warning @ - 1 { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated ; use ' ! ' after the type name } } { { 25 - 53 = } } { { 62 - 62 = ! } } { { 62 - 63 = } } <nl> + / / expected - warning @ - 2 { { overriding instance method parameter of type ' AnyObject ' with implicitly unwrapped optional type ' ImplicitlyUnwrappedOptional < AnyObject > ' } } <nl> + / / expected - note @ - 3 { { add parentheses to silence this warning } } { { 25 - 25 = ( } } { { 63 - 63 = ) } } <nl> <nl> override func oneC ( _ : AnyObject ! ) { } / / expected - warning { { overriding instance method parameter of type ' AnyObject ' with implicitly unwrapped optional type ' AnyObject ! ' } } <nl> / / expected - note @ - 1 { { remove ' ! ' to make the parameter required } } { { 34 - 35 = } } <nl> mmm a / validation - test / Sema / type_checker_crashers_fixed / rdar28048391 . swift <nl> ppp b / validation - test / Sema / type_checker_crashers_fixed / rdar28048391 . swift <nl> extension rdar28048391 { <nl> } <nl> <nl> extension ImplicitlyUnwrappedOptional : rdar28048391 { } <nl> + / / expected - warning @ - 1 { { the spelling ' ImplicitlyUnwrappedOptional ' is deprecated } } <nl>
Merge pull request from rudkx / warn - on - iuo - spelling
apple/swift
f53826afd9bb384990b42399ef948afae208811c
2017-10-27T03:45:38Z
mmm a / fdbserver / masterserver . actor . cpp <nl> ppp b / fdbserver / masterserver . actor . cpp <nl> ACTOR Future < Void > trackTlogRecovery ( Reference < MasterData > self , Reference < Asyn <nl> ACTOR Future < Void > masterCore ( Reference < MasterData > self , PromiseStream < Future < Void > > addActor ) <nl> { <nl> state TraceInterval recoveryInterval ( " MasterRecovery " ) ; <nl> + state double recoverStartTime = now ( ) ; <nl> <nl> addActor . send ( waitFailureServer ( self - > myInterface . waitFailure . getFuture ( ) ) ) ; <nl> <nl> ACTOR Future < Void > masterCore ( Reference < MasterData > self , PromiseStream < Future < <nl> TraceEvent ( recoveryInterval . end ( ) , self - > dbgid ) . detail ( " RecoveryTransactionVersion " , self - > recoveryTransactionVersion ) ; <nl> <nl> self - > recoveryState = RecoveryState : : FULLY_RECOVERED ; <nl> + double recoveryDuration = now ( ) - recoverStartTime ; <nl> + <nl> + TraceEvent ( recoveryDuration > 4 ? SevWarnAlways : SevInfo , " MasterRecoveryDuration " , self - > dbgid ) <nl> + . detail ( " recoveryDuration " , recoveryDuration ) <nl> + . trackLatest ( " MasterRecoveryDuration " ) ; <nl> + <nl> TraceEvent ( " MasterRecoveryState " , self - > dbgid ) <nl> . detail ( " StatusCode " , RecoveryStatus : : fully_recovered ) <nl> . detail ( " Status " , RecoveryStatus : : names [ RecoveryStatus : : fully_recovered ] ) <nl> . detail ( " storeType " , self - > configuration . storageServerStoreType ) <nl> + . detail ( " recoveryDuration " , recoveryDuration ) <nl> . trackLatest ( " MasterRecoveryState " ) ; <nl> <nl> / / Now that recovery is complete , we register ourselves with the cluster controller , so that the client and server information <nl>
added a new trace event tracking master recovery durations
apple/foundationdb
e113dba0e397567deba4f690d2a6eeb09535f029
2017-11-15T20:38:26Z
mmm a / src / Makefile . am <nl> ppp b / src / Makefile . am <nl> BITCOIN_CORE_H = \ <nl> net . h \ <nl> netaddress . h \ <nl> netbase . h \ <nl> + netmessagemaker . h \ <nl> noui . h \ <nl> policy / fees . h \ <nl> policy / policy . h \ <nl> mmm a / src / net . h <nl> ppp b / src / net . h <nl> class CTransaction ; <nl> class CNodeStats ; <nl> class CClientUIInterface ; <nl> <nl> + struct CSerializedNetMsg <nl> + { <nl> + CSerializedNetMsg ( ) = default ; <nl> + CSerializedNetMsg ( CSerializedNetMsg & & ) = default ; <nl> + CSerializedNetMsg & operator = ( CSerializedNetMsg & & ) = default ; <nl> + / / No copying , only moves . <nl> + CSerializedNetMsg ( const CSerializedNetMsg & msg ) = delete ; <nl> + CSerializedNetMsg & operator = ( const CSerializedNetMsg & ) = delete ; <nl> + <nl> + std : : vector < unsigned char > data ; <nl> + std : : string command ; <nl> + } ; <nl> + <nl> + <nl> class CConnman <nl> { <nl> public : <nl> new file mode 100644 <nl> index 000000000000 . . 7167434a19eb <nl> mmm / dev / null <nl> ppp b / src / netmessagemaker . h <nl> <nl> + / / Copyright ( c ) 2009 - 2010 Satoshi Nakamoto <nl> + / / Copyright ( c ) 2009 - 2016 The Bitcoin Core developers <nl> + / / Distributed under the MIT software license , see the accompanying <nl> + / / file COPYING or http : / / www . opensource . org / licenses / mit - license . php . <nl> + <nl> + # ifndef BITCOIN_NETMESSAGEMAKER_H <nl> + # define BITCOIN_NETMESSAGEMAKER_H <nl> + <nl> + # include " net . h " <nl> + # include " serialize . h " <nl> + <nl> + class CNetMsgMaker <nl> + { <nl> + public : <nl> + CNetMsgMaker ( int nVersionIn ) : nVersion ( nVersionIn ) { } <nl> + <nl> + template < typename . . . Args > <nl> + CSerializedNetMsg Make ( int nFlags , std : : string sCommand , Args & & . . . args ) <nl> + { <nl> + CSerializedNetMsg msg ; <nl> + msg . command = std : : move ( sCommand ) ; <nl> + CVectorWriter { SER_NETWORK , nFlags | nVersion , msg . data , 0 , std : : forward < Args > ( args ) . . . } ; <nl> + return msg ; <nl> + } <nl> + <nl> + template < typename . . . Args > <nl> + CSerializedNetMsg Make ( std : : string sCommand , Args & & . . . args ) <nl> + { <nl> + return Make ( 0 , std : : move ( sCommand ) , std : : forward < Args > ( args ) . . . ) ; <nl> + } <nl> + <nl> + private : <nl> + const int nVersion ; <nl> + } ; <nl> + <nl> + # endif / / BITCOIN_NETMESSAGEMAKER_H <nl> mmm a / src / streams . h <nl> ppp b / src / streams . h <nl> OverrideStream < S > WithOrVersion ( S * s , int nVersionFlag ) <nl> return OverrideStream < S > ( s , s - > GetType ( ) , s - > GetVersion ( ) | nVersionFlag ) ; <nl> } <nl> <nl> + / * Minimal stream for overwriting and / or appending to an existing byte vector <nl> + * <nl> + * The referenced vector will grow as necessary <nl> + * / <nl> + class CVectorWriter <nl> + { <nl> + public : <nl> + <nl> + / * <nl> + * @ param [ in ] nTypeIn Serialization Type <nl> + * @ param [ in ] nVersionIn Serialization Version ( including any flags ) <nl> + * @ param [ in ] vchDataIn Referenced byte vector to overwrite / append <nl> + * @ param [ in ] nPosIn Starting position . Vector index where writes should start . The vector will initially <nl> + * grow as necessary to max ( index , vec . size ( ) ) . So to append , use vec . size ( ) . <nl> + * / <nl> + CVectorWriter ( int nTypeIn , int nVersionIn , std : : vector < unsigned char > & vchDataIn , size_t nPosIn ) : nType ( nTypeIn ) , nVersion ( nVersionIn ) , vchData ( vchDataIn ) , nPos ( nPosIn ) <nl> + { <nl> + if ( nPos > vchData . size ( ) ) <nl> + vchData . resize ( nPos ) ; <nl> + } <nl> + / * <nl> + * ( other params same as above ) <nl> + * @ param [ in ] args A list of items to serialize starting at nPos . <nl> + * / <nl> + template < typename . . . Args > <nl> + CVectorWriter ( int nTypeIn , int nVersionIn , std : : vector < unsigned char > & vchDataIn , size_t nPosIn , Args & & . . . args ) : CVectorWriter ( nTypeIn , nVersionIn , vchDataIn , nPosIn ) <nl> + { <nl> + : : SerializeMany ( * this , std : : forward < Args > ( args ) . . . ) ; <nl> + } <nl> + void write ( const char * pch , size_t nSize ) <nl> + { <nl> + assert ( nPos < = vchData . size ( ) ) ; <nl> + size_t nOverwrite = std : : min ( nSize , vchData . size ( ) - nPos ) ; <nl> + if ( nOverwrite ) { <nl> + memcpy ( vchData . data ( ) + nPos , reinterpret_cast < const unsigned char * > ( pch ) , nOverwrite ) ; <nl> + } <nl> + if ( nOverwrite < nSize ) { <nl> + vchData . insert ( vchData . end ( ) , reinterpret_cast < const unsigned char * > ( pch ) + nOverwrite , reinterpret_cast < const unsigned char * > ( pch ) + nSize ) ; <nl> + } <nl> + nPos + = nSize ; <nl> + } <nl> + template < typename T > <nl> + CVectorWriter & operator < < ( const T & obj ) <nl> + { <nl> + / / Serialize to this stream <nl> + : : Serialize ( * this , obj ) ; <nl> + return ( * this ) ; <nl> + } <nl> + int GetVersion ( ) const <nl> + { <nl> + return nVersion ; <nl> + } <nl> + int GetType ( ) const <nl> + { <nl> + return nType ; <nl> + } <nl> + void seek ( size_t nSize ) <nl> + { <nl> + nPos + = nSize ; <nl> + if ( nPos > vchData . size ( ) ) <nl> + vchData . resize ( nPos ) ; <nl> + } <nl> + private : <nl> + const int nType ; <nl> + const int nVersion ; <nl> + std : : vector < unsigned char > & vchData ; <nl> + size_t nPos ; <nl> + } ; <nl> + <nl> / * * Double ended buffer combining vector and stream - like interfaces . <nl> * <nl> * > > and < < read and write unformatted data using the above serialization templates . <nl> mmm a / src / test / streams_tests . cpp <nl> ppp b / src / test / streams_tests . cpp <nl> using namespace boost : : assign ; / / bring ' operator + = ( ) ' into scope <nl> <nl> BOOST_FIXTURE_TEST_SUITE ( streams_tests , BasicTestingSetup ) <nl> <nl> + BOOST_AUTO_TEST_CASE ( streams_vector_writer ) <nl> + { <nl> + unsigned char a ( 1 ) ; <nl> + unsigned char b ( 2 ) ; <nl> + unsigned char bytes [ ] = { 3 , 4 , 5 , 6 } ; <nl> + std : : vector < unsigned char > vch ; <nl> + <nl> + / / Each test runs twice . Serializing a second time at the same starting <nl> + / / point should yield the same results , even if the first test grew the <nl> + / / vector . <nl> + <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 0 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 1 , 2 } } ) ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 0 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 1 , 2 } } ) ) ; <nl> + vch . clear ( ) ; <nl> + <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 2 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 0 , 0 , 1 , 2 } } ) ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 2 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 0 , 0 , 1 , 2 } } ) ) ; <nl> + vch . clear ( ) ; <nl> + <nl> + vch . resize ( 5 , 0 ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 2 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 0 , 0 , 1 , 2 , 0 } } ) ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 2 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 0 , 0 , 1 , 2 , 0 } } ) ) ; <nl> + vch . clear ( ) ; <nl> + <nl> + vch . resize ( 4 , 0 ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 3 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 0 , 0 , 0 , 1 , 2 } } ) ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 3 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 0 , 0 , 0 , 1 , 2 } } ) ) ; <nl> + vch . clear ( ) ; <nl> + <nl> + vch . resize ( 4 , 0 ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 4 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 0 , 0 , 0 , 0 , 1 , 2 } } ) ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 4 , a , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 0 , 0 , 0 , 0 , 1 , 2 } } ) ) ; <nl> + vch . clear ( ) ; <nl> + <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 0 , FLATDATA ( bytes ) ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 3 , 4 , 5 , 6 } } ) ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 0 , FLATDATA ( bytes ) ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 3 , 4 , 5 , 6 } } ) ) ; <nl> + vch . clear ( ) ; <nl> + <nl> + vch . resize ( 4 , 8 ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 2 , a , FLATDATA ( bytes ) , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 8 , 8 , 1 , 3 , 4 , 5 , 6 , 2 } } ) ) ; <nl> + CVectorWriter ( SER_NETWORK , INIT_PROTO_VERSION , vch , 2 , a , FLATDATA ( bytes ) , b ) ; <nl> + BOOST_CHECK ( ( vch = = std : : vector < unsigned char > { { 8 , 8 , 1 , 3 , 4 , 5 , 6 , 2 } } ) ) ; <nl> + vch . clear ( ) ; <nl> + } <nl> + <nl> BOOST_AUTO_TEST_CASE ( streams_serializedata_xor ) <nl> { <nl> std : : vector < char > in ; <nl>
net : add CVectorWriter and CNetMsgMaker
bitcoin/bitcoin
2ec935dcaab9557addcf73c33aa7f2db8cc01fee
2016-11-25T17:09:58Z
mmm a / src / elements - kind . h <nl> ppp b / src / elements - kind . h <nl> inline bool IsHoleyFrozenOrSealedElementsKind ( ElementsKind kind ) { <nl> } <nl> <nl> inline bool IsHoleyElementsKind ( ElementsKind kind ) { <nl> - return kind = = HOLEY_SMI_ELEMENTS | | kind = = HOLEY_DOUBLE_ELEMENTS | | <nl> - kind = = HOLEY_ELEMENTS ; <nl> + return kind % 2 = = 1 & & kind < = HOLEY_DOUBLE_ELEMENTS ; <nl> } <nl> <nl> inline bool IsHoleyElementsKindForRead ( ElementsKind kind ) { <nl> - return IsHoleyElementsKind ( kind ) | | IsHoleyFrozenOrSealedElementsKind ( kind ) ; <nl> + return kind % 2 = = 1 & & kind < = HOLEY_FROZEN_ELEMENTS ; <nl> } <nl> <nl> inline bool IsHoleyOrDictionaryElementsKind ( ElementsKind kind ) { <nl> return IsHoleyElementsKindForRead ( kind ) | | kind = = DICTIONARY_ELEMENTS ; <nl> } <nl> <nl> - <nl> inline bool IsFastPackedElementsKind ( ElementsKind kind ) { <nl> - return kind = = PACKED_SMI_ELEMENTS | | kind = = PACKED_DOUBLE_ELEMENTS | | <nl> - kind = = PACKED_ELEMENTS ; <nl> + return kind % 2 = = 0 & & kind < = PACKED_DOUBLE_ELEMENTS ; <nl> } <nl> <nl> <nl> mmm a / test / cctest / test - elements - kind . cc <nl> ppp b / test / cctest / test - elements - kind . cc <nl> bool EQUALS ( Isolate * isolate , T left , Handle < M > right ) { <nl> return EQUALS ( isolate , handle ( left , isolate ) , right ) ; <nl> } <nl> <nl> + bool ElementsKindIsHoleyElementsKindForRead ( ElementsKind kind ) { <nl> + switch ( kind ) { <nl> + case ElementsKind : : HOLEY_SMI_ELEMENTS : <nl> + case ElementsKind : : HOLEY_ELEMENTS : <nl> + case ElementsKind : : HOLEY_DOUBLE_ELEMENTS : <nl> + case ElementsKind : : HOLEY_SEALED_ELEMENTS : <nl> + case ElementsKind : : HOLEY_FROZEN_ELEMENTS : <nl> + return true ; <nl> + default : <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> + bool ElementsKindIsHoleyElementsKind ( ElementsKind kind ) { <nl> + switch ( kind ) { <nl> + case ElementsKind : : HOLEY_SMI_ELEMENTS : <nl> + case ElementsKind : : HOLEY_ELEMENTS : <nl> + case ElementsKind : : HOLEY_DOUBLE_ELEMENTS : <nl> + return true ; <nl> + default : <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> + bool ElementsKindIsFastPackedElementsKind ( ElementsKind kind ) { <nl> + switch ( kind ) { <nl> + case ElementsKind : : PACKED_SMI_ELEMENTS : <nl> + case ElementsKind : : PACKED_ELEMENTS : <nl> + case ElementsKind : : PACKED_DOUBLE_ELEMENTS : <nl> + return true ; <nl> + default : <nl> + return false ; <nl> + } <nl> + } <nl> + <nl> } / / namespace <nl> <nl> <nl> TEST ( JSArrayAddingElementsGeneralizingiFastDoubleElements ) { <nl> CHECK_EQ ( array - > map ( ) , * previous_map ) ; <nl> } <nl> <nl> + TEST ( IsHoleyElementsKindForRead ) { <nl> + for ( int i = 0 ; i < = ElementsKind : : LAST_ELEMENTS_KIND ; i + + ) { <nl> + ElementsKind kind = static_cast < ElementsKind > ( i ) ; <nl> + CHECK_EQ ( ElementsKindIsHoleyElementsKindForRead ( kind ) , <nl> + IsHoleyElementsKindForRead ( kind ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( IsHoleyElementsKind ) { <nl> + for ( int i = 0 ; i < = ElementsKind : : LAST_ELEMENTS_KIND ; i + + ) { <nl> + ElementsKind kind = static_cast < ElementsKind > ( i ) ; <nl> + CHECK_EQ ( ElementsKindIsHoleyElementsKind ( kind ) , IsHoleyElementsKind ( kind ) ) ; <nl> + } <nl> + } <nl> + <nl> + TEST ( IsFastPackedElementsKind ) { <nl> + for ( int i = 0 ; i < = ElementsKind : : LAST_ELEMENTS_KIND ; i + + ) { <nl> + ElementsKind kind = static_cast < ElementsKind > ( i ) ; <nl> + CHECK_EQ ( ElementsKindIsFastPackedElementsKind ( kind ) , <nl> + IsFastPackedElementsKind ( kind ) ) ; <nl> + } <nl> + } <nl> + <nl> } / / namespace test_elements_kind <nl> } / / namespace internal <nl> } / / namespace v8 <nl>
Refactor holey and packed elements - kind check
v8/v8
bf47bfd1fceb387ce31dce699f33fcac70ad889a
2019-05-08T12:22:35Z
mmm a / tensorflow / lite / toco / python / BUILD <nl> ppp b / tensorflow / lite / toco / python / BUILD <nl> tf_py_test ( <nl> " / / tensorflow / lite / toco : model_flags_proto_py " , <nl> " / / tensorflow / lite / toco : toco_flags_proto_py " , <nl> ] , <nl> - data = [ <nl> - " : toco_from_protos . par " , <nl> - ] , <nl> tags = [ <nl> " no_oss " , <nl> " no_pip " , <nl> mmm a / tensorflow / python / autograph / pyct / BUILD <nl> ppp b / tensorflow / python / autograph / pyct / BUILD <nl> py_test ( <nl> sh_test ( <nl> name = " inspect_utils_test_par " , <nl> srcs = [ " inspect_utils_test . sh " ] , <nl> - data = [ <nl> - " : inspect_utils_test . par " , <nl> - ] , <nl> tags = [ " no_oss " ] , <nl> ) <nl> <nl>
Internal change
tensorflow/tensorflow
a84e7babc9d6c3752e30a070732b66e1a88369f9
2019-05-21T20:31:53Z
mmm a / ports / osg - qt / CONTROL <nl> ppp b / ports / osg - qt / CONTROL <nl> <nl> Source : osg - qt <nl> - Version : Qt4 <nl> - Description : osgQt - Qt project for making use of OpenSceneGraph ( OSG ) <nl> + Version : Qt4 - 1 <nl> + Description : osgQt - Qt project for making use of OpenSceneGraph ( OSG ) <nl> Build - Depends : osg , protobuf , qt5 - base <nl> \ No newline at end of file <nl> mmm a / ports / osg - qt / portfile . cmake <nl> ppp b / ports / osg - qt / portfile . cmake <nl> <nl> - include ( vcpkg_common_functions ) <nl> <nl> vcpkg_from_github ( <nl> OUT_SOURCE_PATH SOURCE_PATH <nl> vcpkg_from_github ( <nl> REF Qt4 <nl> SHA512 426a4ba88f680978d24817248b99c68cafa4517144e6e3d2480612870c4a224bb955539cacb438274d4ee1c93c36d94f8437d142070b2ecde2b81517bf357e71 <nl> HEAD_REF master <nl> - PATCHES <nl> + PATCHES <nl> OsgMacroUtils . patch <nl> ) <nl> <nl> + if ( VCPKG_TARGET_IS_OSX ) <nl> + string ( APPEND VCPKG_CXX_FLAGS " - stdlib = libc + + " ) <nl> + string ( APPEND VCPKG_C_FLAGS " " ) # both must be set <nl> + endif ( ) <nl> <nl> vcpkg_configure_cmake ( <nl> SOURCE_PATH $ { SOURCE_PATH } <nl> vcpkg_install_cmake ( ) <nl> file ( REMOVE_RECURSE $ { CURRENT_PACKAGES_DIR } / debug / include ) <nl> <nl> # Handle License <nl> - file ( COPY $ { SOURCE_PATH } / LICENSE . txt DESTINATION $ { CURRENT_PACKAGES_DIR } / share / osg - qt ) <nl> - file ( RENAME $ { CURRENT_PACKAGES_DIR } / share / osg - qt / LICENSE . txt $ { CURRENT_PACKAGES_DIR } / share / osg - qt / copyright ) <nl> \ No newline at end of file <nl> + file ( COPY $ { SOURCE_PATH } / LICENSE . txt DESTINATION $ { CURRENT_PACKAGES_DIR } / share / $ { PORT } ) <nl> + file ( RENAME $ { CURRENT_PACKAGES_DIR } / share / $ { PORT } / LICENSE . txt $ { CURRENT_PACKAGES_DIR } / share / $ { PORT } / copyright ) <nl> \ No newline at end of file <nl> mmm a / ports / pango / CONTROL <nl> ppp b / ports / pango / CONTROL <nl> Source : pango <nl> Version : 1 . 40 . 11 - 6 <nl> Homepage : https : / / ftp . gnome . org / pub / GNOME / sources / pango / <nl> Description : Text and font handling library . <nl> - Build - Depends : glib , gettext , cairo , fontconfig , freetype , harfbuzz [ glib ] ( ! ( windows & static ) ) <nl> + Build - Depends : glib , gettext , cairo , fontconfig , freetype , harfbuzz [ glib ] ( ! ( windows & static ) & ! osx ) <nl> mmm a / ports / qt5 - base / cmake / qt_build_submodule . cmake <nl> ppp b / ports / qt5 - base / cmake / qt_build_submodule . cmake <nl> function ( qt_build_submodule SOURCE_PATH ) <nl> endforeach ( ) <nl> if ( EXISTS " $ { CURRENT_PACKAGES_DIR } / tools / qt5 / bin " ) <nl> file ( COPY " $ { CURRENT_PACKAGES_DIR } / tools / qt5 / bin " DESTINATION " $ { CURRENT_PACKAGES_DIR } / tools / $ { PORT } " ) <nl> + <nl> + set ( CURRENT_INSTALLED_DIR_BACKUP " $ { CURRENT_INSTALLED_DIR } " ) <nl> + set ( CURRENT_INSTALLED_DIR " . / . . / . . / . . " ) # Making the qt . conf relative and not absolute <nl> + configure_file ( $ { CURRENT_INSTALLED_DIR_BACKUP } / tools / qt5 / qt_release . conf $ { CURRENT_PACKAGES_DIR } / tools / $ { PORT } / bin / qt . conf ) # This makes the tools at least useable for release <nl> + set ( CURRENT_INSTALLED_DIR " $ { CURRENT_INSTALLED_DIR_BACKUP } " ) <nl> + <nl> vcpkg_copy_tool_dependencies ( " $ { CURRENT_PACKAGES_DIR } / tools / $ { PORT } / bin " ) <nl> + if ( VCPKG_TARGET_IS_WINDOWS AND VCPKG_LIBRARY_LINKAGE STREQUAL " dynamic " ) <nl> + file ( GLOB_RECURSE DLL_DEPS_AVAIL " $ { CURRENT_INSTALLED_DIR } / tools / qt5 / bin / * . dll " ) <nl> + string ( REPLACE " $ { CURRENT_INSTALLED_DIR } / tools / qt5 / bin / " " " DLL_DEPS_AVAIL " $ { DLL_DEPS_AVAIL } " ) <nl> + file ( GLOB_RECURSE DLL_DEPS_NEEDED " $ { CURRENT_PACKAGES_DIR } / tools / $ { PORT } / bin / * . dll " ) <nl> + string ( REPLACE " $ { CURRENT_PACKAGES_DIR } / tools / $ { PORT } / bin / " " " DLL_DEPS_NEEDED " $ { DLL_DEPS_NEEDED } " ) <nl> + if ( DLL_DEPS_AVAIL AND DLL_DEPS_NEEDED ) <nl> + list ( REMOVE_ITEM DLL_DEPS_NEEDED $ { DLL_DEPS_AVAIL } ) <nl> + endif ( ) <nl> + foreach ( dll_dep $ { DLL_DEPS_NEEDED } ) <nl> + string ( REGEX REPLACE " [ ^ / ] + $ " " " dll_subpath " $ { dll_dep } " ) <nl> + file ( COPY " $ { CURRENT_PACKAGES_DIR } / tools / $ { PORT } / bin / $ { dll_dep } " DESTINATION " $ { CURRENT_PACKAGES_DIR } / tools / qt5 / bin / $ { dll_subpath } " ) <nl> + endforeach ( ) <nl> + endif ( ) <nl> endif ( ) <nl> endfunction ( ) <nl> \ No newline at end of file <nl> mmm a / ports / qt5 - base / cmake / qt_fix_cmake . cmake <nl> ppp b / ports / qt5 - base / cmake / qt_fix_cmake . cmake <nl> <nl> function ( qt_fix_cmake PACKAGE_DIR_TO_FIX PORT_TO_FIX ) <nl> - set ( BACKUP_PATH " $ ENV { PATH } " ) <nl> - # Find Python and add it to the path <nl> - vcpkg_find_acquire_program ( PYTHON2 ) <nl> - get_filename_component ( PYTHON2_EXE_PATH $ { PYTHON2 } DIRECTORY ) <nl> - vcpkg_add_to_path ( " $ { PYTHON2_EXE_PATH } " ) <nl> <nl> - # Fix the cmake files if they exist <nl> - if ( EXISTS $ { PACKAGE_DIR_TO_FIX } / lib / cmake ) <nl> - vcpkg_execute_required_process ( <nl> - COMMAND $ { PYTHON2 } $ { CURRENT_INSTALLED_DIR } / share / qt5 / fixcmake . py $ { PORT_TO_FIX } <nl> - WORKING_DIRECTORY $ { PACKAGE_DIR_TO_FIX } / lib / cmake <nl> - LOGNAME fix - cmake <nl> - ) <nl> - endif ( ) <nl> - if ( EXISTS $ { PACKAGE_DIR_TO_FIX } / share / cmake ) <nl> - vcpkg_execute_required_process ( <nl> - COMMAND $ { PYTHON2 } $ { CURRENT_INSTALLED_DIR } / share / qt5 / fixcmake . py $ { PORT_TO_FIX } <nl> - WORKING_DIRECTORY $ { PACKAGE_DIR_TO_FIX } / share / cmake <nl> - LOGNAME fix - cmake <nl> - ) <nl> - endif ( ) <nl> + file ( GLOB_RECURSE cmakefiles $ { PACKAGE_DIR_TO_FIX } / share / cmake / * . cmake $ { PACKAGE_DIR_TO_FIX } / lib / cmake / * . cmake ) <nl> + foreach ( cmakefile $ { cmakefiles } ) <nl> + file ( READ " $ { cmakefile } " _contents ) <nl> + if ( _contents MATCHES " _install_prefix } / tools / qt5 / bin / ( [ a - z0 - 9 ] + ) " ) # there are only about 3 to 5 cmake files which require the fix in ports : qt5 - tools qt5 - xmlpattern at5 - activeqt qt5 - quick <nl> + string ( REGEX REPLACE " _install_prefix } / tools / qt5 / bin / ( [ a - z0 - 9 ] + ) " " _install_prefix } / tools / $ { PORT_TO_FIX } / bin / \ \ 1 " _contents " $ { _contents } " ) <nl> + file ( WRITE " $ { cmakefile } " " $ { _contents } " ) <nl> + endif ( ) <nl> + endforeach ( ) <nl> + <nl> # Install cmake files <nl> if ( EXISTS $ { PACKAGE_DIR_TO_FIX } / lib / cmake ) <nl> file ( MAKE_DIRECTORY $ { PACKAGE_DIR_TO_FIX } / share ) <nl> function ( qt_fix_cmake PACKAGE_DIR_TO_FIX PORT_TO_FIX ) <nl> if ( EXISTS $ { PACKAGE_DIR_TO_FIX } / debug / lib / cmake ) <nl> file ( REMOVE_RECURSE $ { PACKAGE_DIR_TO_FIX } / debug / lib / cmake ) <nl> endif ( ) <nl> - set ( ENV { PATH } " $ { BACKUP_PATH } " ) <nl> endfunction ( ) <nl> \ No newline at end of file <nl> deleted file mode 100644 <nl> index 6f677327c1c . . 00000000000 <nl> mmm a / ports / qt5 - base / fixcmake . py <nl> ppp / dev / null <nl> <nl> - import os <nl> - import re <nl> - import sys <nl> - from glob import glob <nl> - <nl> - port = " qt5 " <nl> - if len ( sys . argv ) > 1 : <nl> - port = sys . argv [ 1 ] <nl> - <nl> - files = [ y for x in os . walk ( ' . ' ) for y in glob ( os . path . join ( x [ 0 ] , ' * . cmake ' ) ) ] <nl> - tooldir = " / tools / " + port + " / bin / " <nl> - <nl> - for f in files : <nl> - openedfile = open ( f , " r " ) <nl> - builder = " " <nl> - dllpatterndebug = re . compile ( " _install_prefix } / bin / Qt5 . * d + ( . dll | . so ) " ) <nl> - libpatterndebug = re . compile ( " _install_prefix } / lib / Qt5 . * d + ( . lib | . a ) " ) <nl> - exepattern = re . compile ( " _install_prefix } / bin / [ a - z ] + ( . exe | ) " ) <nl> - toolexepattern = re . compile ( " _install_prefix } / tools / qt5 / bin / [ a - z ] + ( . exe | ) " ) <nl> - tooldllpattern = re . compile ( " _install_prefix } / tools / qt5 / bin / Qt5 . * d + ( . dll | . so ) " ) <nl> - populatepluginpattern = re . compile ( " _populate_ [ ^ _ ] + _plugin_properties \ ( [ ^ ] + RELEASE " ) <nl> - populatetargetpattern = re . compile ( " _populate_ [ ^ _ ] + _target_properties \ ( RELEASE " ) <nl> - for line in openedfile : <nl> - if " _install_prefix } / tools / qt5 / $ { LIB_LOCATION } " in line : <nl> - builder + = " if ( $ { Configuration } STREQUAL \ " RELEASE \ " ) " <nl> - builder + = " \ n " + line . replace ( " / tools / qt5 / bin " , " / bin / " ) <nl> - builder + = " else ( ) " # This requires a release and debug build since Qt will check that the file exists ! <nl> - # It would be better to use an elseif here with a EXISTS check but that requires a more complicated regex to build the complete filepath since each module uses its own _ ( qtmodule ) _install_prefix <nl> - # so single configuration builds of Qt are currently not supported . Thus = > <nl> - # TODO : Make single configuration builds of Qt work correctly ! <nl> - builder + = " \ n " + line . replace ( " / tools / qt5 / debug / bin " , " / debug / bin / " ) <nl> - builder + = " endif ( ) \ n " <nl> - elif " _install_prefix } / bin / $ { LIB_LOCATION } " in line : <nl> - builder + = " if ( $ { Configuration } STREQUAL \ " RELEASE \ " ) " <nl> - builder + = " \ n " + line <nl> - builder + = " else ( ) " # This requires a release and debug build ! <nl> - builder + = " \ n " + line . replace ( " / bin / " , " / debug / bin / " ) <nl> - builder + = " endif ( ) \ n " <nl> - elif " _install_prefix } / lib / $ { LIB_LOCATION } " in line : <nl> - builder + = " if ( $ { Configuration } STREQUAL \ " RELEASE \ " ) " <nl> - builder + = " \ n " + line <nl> - builder + = " else ( ) " # This requires a release and debug build ! <nl> - builder + = " \ n " + line . replace ( " / lib / " , " / debug / lib / " ) <nl> - builder + = " endif ( ) \ n " <nl> - elif " _install_prefix } / lib / $ { IMPLIB_LOCATION } " in line : <nl> - builder + = " if ( $ { Configuration } STREQUAL \ " RELEASE \ " ) " <nl> - builder + = " \ n " + line <nl> - builder + = " else ( ) " # This requires a release and debug build ! <nl> - builder + = " \ n " + line . replace ( " / lib / " , " / debug / lib / " ) <nl> - builder + = " endif ( ) \ n " <nl> - elif " _install_prefix } / plugins / $ { PLUGIN_LOCATION } " in line : <nl> - builder + = " if ( $ { Configuration } STREQUAL \ " RELEASE \ " ) " <nl> - builder + = " \ n " + line <nl> - builder + = " else ( ) " # This requires a release and debug build ! <nl> - builder + = " \ n " + line . replace ( " / plugins / " , " / debug / plugins / " ) <nl> - builder + = " endif ( ) \ n " <nl> - elif " _install_prefix } / lib / qtmaind . lib " in line : <nl> - # qtmaind . lib has been moved to manual - link : <nl> - builder + = line . replace ( " / lib / " , " / debug / lib / manual - link / " ) <nl> - elif " _install_prefix } / lib / qtmain . lib " in line : <nl> - # qtmain ( d ) . lib has been moved to manual - link : <nl> - builder + = line . replace ( " / lib / " , " / lib / manual - link / " ) <nl> - builder + = " set ( imported_location_debug \ " $ { _qt5Core_install_prefix } / debug / lib / manual - link / qtmaind . lib \ " ) \ n " <nl> - builder + = " \ n " <nl> - builder + = " set_target_properties ( Qt5 : : WinMain PROPERTIES \ n " <nl> - builder + = " IMPORTED_LOCATION_DEBUG $ { imported_location_debug } \ n " <nl> - builder + = " ) \ n " <nl> - elif populatepluginpattern . search ( line ) ! = None : <nl> - builder + = line <nl> - builder + = line . replace ( " RELEASE " , " DEBUG " ) . replace ( " . dll " , " d . dll " ) . replace ( " . lib " , " d . lib " ) <nl> - elif populatetargetpattern . search ( line ) ! = None : <nl> - builder + = line <nl> - builder + = line . replace ( " RELEASE " , " DEBUG " ) . replace ( " . dll " , " d . dll " ) . replace ( " . lib " , " d . lib " ) <nl> - elif dllpatterndebug . search ( line ) ! = None : <nl> - builder + = line . replace ( " / bin / " , " / debug / bin / " ) <nl> - elif libpatterndebug . search ( line ) ! = None : <nl> - builder + = line . replace ( " / lib / " , " / debug / lib / " ) <nl> - elif tooldllpattern . search ( line ) ! = None : <nl> - builder + = line . replace ( " / tools / qt5 / bin " , " / debug / bin / " ) <nl> - elif exepattern . search ( line ) ! = None : <nl> - builder + = line . replace ( " / bin / " , tooldir ) <nl> - elif toolexepattern . search ( line ) ! = None : <nl> - builder + = line . replace ( " / tools / qt5 / bin / " , tooldir ) <nl> - else : <nl> - builder + = line <nl> - new_file = open ( f , " w " ) <nl> - new_file . write ( builder ) <nl> - new_file . close ( ) <nl> new file mode 100644 <nl> index 00000000000 . . d113fb6695b <nl> mmm / dev / null <nl> ppp b / ports / qt5 - base / patches / Qt5BasicConfig . patch <nl> <nl> pppmmm a / mkspecs / features / data / cmake / Qt5BasicConfig . cmake . in <nl> ppp + b / mkspecs / features / data / cmake / Qt5BasicConfig . cmake . in <nl> + macro ( _populate_ $ $ { CMAKE_MODULE_NAME } _target_properties Configuration LIB_LOCATI <nl> + set_property ( TARGET Qt5 : : $ $ { CMAKE_MODULE_NAME } APPEND PROPERTY IMPORTED_CONFIGURATIONS $ { Configuration } ) <nl> + <nl> + ! ! IF isEmpty ( CMAKE_DLL_DIR_IS_ABSOLUTE ) <nl> + - set ( imported_location \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / $ $ { CMAKE_DLL_DIR } $ { LIB_LOCATION } \ " ) <nl> + + if ( \ " $ { Configuration } \ " STREQUAL \ " DEBUG \ " ) <nl> + + set ( imported_location \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / debug / $ $ { CMAKE_DLL_DIR } $ { LIB_LOCATION } \ " ) <nl> + + else ( ) <nl> + + set ( imported_location \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / $ $ { CMAKE_DLL_DIR } $ { LIB_LOCATION } \ " ) <nl> + + endif ( ) <nl> + ! ! ELSE <nl> + set ( imported_location \ " $ $ { CMAKE_DLL_DIR } $ { LIB_LOCATION } \ " ) <nl> + ! ! ENDIF <nl> + macro ( _populate_ $ $ { CMAKE_MODULE_NAME } _target_properties Configuration LIB_LOCATI <nl> + ) <nl> + <nl> + ! ! IF ! isEmpty ( CMAKE_WINDOWS_BUILD ) <nl> + ! ! IF isEmpty ( CMAKE_LIB_DIR_IS_ABSOLUTE ) <nl> + - set ( imported_implib \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / $ $ { CMAKE_LIB_DIR } $ { IMPLIB_LOCATION } \ " ) <nl> + + if ( \ " $ { Configuration } \ " STREQUAL \ " DEBUG \ " ) <nl> + + set ( imported_implib \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / debug / $ $ { CMAKE_LIB_DIR } $ { IMPLIB_LOCATION } \ " ) <nl> + + else ( ) <nl> + + set ( imported_implib \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / $ $ { CMAKE_LIB_DIR } $ { IMPLIB_LOCATION } \ " ) <nl> + + endif ( ) <nl> + ! ! ELSE <nl> + set ( imported_implib \ " IMPORTED_IMPLIB_ $ { Configuration } \ " \ " $ $ { CMAKE_LIB_DIR } $ { IMPLIB_LOCATION } \ " ) <nl> + ! ! ENDIF <nl> + if ( NOT TARGET Qt5 : : $ $ { CMAKE_MODULE_NAME } ) <nl> + ! ! ENDIF / / CMAKE_STATIC_WINDOWS_BUILD <nl> + <nl> + ! ! IF ! isEmpty ( CMAKE_FIND_OTHER_LIBRARY_BUILD ) <nl> + - ! ! IF isEmpty ( CMAKE_DEBUG_TYPE ) <nl> + ! ! IF ! isEmpty ( CMAKE_STATIC_WINDOWS_BUILD ) <nl> + ! ! IF isEmpty ( CMAKE_LIB_DIR_IS_ABSOLUTE ) <nl> + - if ( EXISTS \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / $ $ { CMAKE_LIB_DIR } $ $ { CMAKE_IMPLIB_FILE_LOCATION_DEBUG } \ " ) <nl> + + if ( EXISTS \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / debug / $ $ { CMAKE_LIB_DIR } $ $ { CMAKE_IMPLIB_FILE_LOCATION_DEBUG } \ " ) <nl> + ! ! ELSE / / CMAKE_LIB_DIR_IS_ABSOLUTE <nl> + if ( EXISTS \ " $ $ { CMAKE_IMPLIB_FILE_LOCATION_DEBUG } \ " ) <nl> + if ( NOT TARGET Qt5 : : $ $ { CMAKE_MODULE_NAME } ) <nl> + ! ! ELSE / / CMAKE_STATIC_WINDOWS_BUILD <nl> + if ( EXISTS <nl> + ! ! IF isEmpty ( CMAKE_DLL_DIR_IS_ABSOLUTE ) <nl> + - \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / $ $ { CMAKE_DLL_DIR } $ $ { CMAKE_LIB_FILE_LOCATION_DEBUG } \ " <nl> + + \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / debug / $ $ { CMAKE_DLL_DIR } $ $ { CMAKE_LIB_FILE_LOCATION_DEBUG } \ " <nl> + ! ! ELSE <nl> + \ " $ $ { CMAKE_LIB_FILE_LOCATION_DEBUG } \ " <nl> + ! ! ENDIF <nl> + AND EXISTS <nl> + ! ! IF isEmpty ( CMAKE_LIB_DIR_IS_ABSOLUTE ) <nl> + - \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / $ $ { CMAKE_LIB_DIR } $ $ { CMAKE_IMPLIB_FILE_LOCATION_DEBUG } \ " ) <nl> + + \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / debug / $ $ { CMAKE_LIB_DIR } $ $ { CMAKE_IMPLIB_FILE_LOCATION_DEBUG } \ " ) <nl> + ! ! ELSE <nl> + \ " $ $ { CMAKE_IMPLIB_FILE_LOCATION_DEBUG } \ " ) <nl> + ! ! ENDIF <nl> + _populate_ $ $ { CMAKE_MODULE_NAME } _target_properties ( DEBUG \ " $ $ { CMAKE_LIB_FILE_LOCATION_DEBUG } \ " \ " $ $ { CMAKE_IMPLIB_FILE_LOCATION_DEBUG } \ " ) <nl> + ! ! ENDIF / / CMAKE_STATIC_WINDOWS_BUILD <nl> + endif ( ) <nl> + - ! ! ENDIF / / CMAKE_DEBUG_TYPE <nl> + ! ! ENDIF / / CMAKE_FIND_OTHER_LIBRARY_BUILD <nl> + <nl> + ! ! ENDIF / / CMAKE_RELEASE_TYPE <nl> + if ( NOT TARGET Qt5 : : $ $ { CMAKE_MODULE_NAME } ) <nl> + set_property ( TARGET Qt5 : : $ { Plugin } APPEND PROPERTY IMPORTED_CONFIGURATIONS $ { Configuration } ) <nl> + <nl> + ! ! IF isEmpty ( CMAKE_PLUGIN_DIR_IS_ABSOLUTE ) <nl> + + if ( \ " $ { Configuration } \ " STREQUAL \ " DEBUG \ " ) <nl> + + set ( imported_location \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / debug / $ $ { CMAKE_PLUGIN_DIR } $ { PLUGIN_LOCATION } \ " ) <nl> + + else ( ) <nl> + set ( imported_location \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / $ $ { CMAKE_PLUGIN_DIR } $ { PLUGIN_LOCATION } \ " ) <nl> + + endif ( ) <nl> + + <nl> + ! ! ELSE <nl> + set ( imported_location \ " $ $ { CMAKE_PLUGIN_DIR } $ { PLUGIN_LOCATION } \ " ) <nl> + ! ! ENDIF <nl> new file mode 100644 <nl> index 00000000000 . . 0ae0a03c0aa <nl> mmm / dev / null <nl> ppp b / ports / qt5 - base / patches / Qt5PluginTarget . patch <nl> <nl> pppmmm a / mkspecs / features / data / cmake / Qt5PluginTarget . cmake . in <nl> ppp + b / mkspecs / features / data / cmake / Qt5PluginTarget . cmake . in <nl> + <nl> + <nl> + add_library ( Qt5 : : $ $ CMAKE_PLUGIN_NAME MODULE IMPORTED ) <nl> + <nl> + - ! ! IF ! isEmpty ( CMAKE_RELEASE_TYPE ) <nl> + - _populate_ $ $ { CMAKE_MODULE_NAME } _plugin_properties ( $ $ CMAKE_PLUGIN_NAME RELEASE \ " $ $ { CMAKE_PLUGIN_LOCATION_RELEASE } \ " ) <nl> + - ! ! ENDIF <nl> + - ! ! IF ! isEmpty ( CMAKE_DEBUG_TYPE ) <nl> + - _populate_ $ $ { CMAKE_MODULE_NAME } _plugin_properties ( $ $ CMAKE_PLUGIN_NAME DEBUG \ " $ $ { CMAKE_PLUGIN_LOCATION_DEBUG } \ " ) <nl> + - ! ! ENDIF <nl> + + if ( EXISTS \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / $ $ { CMAKE_PLUGIN_DIR } $ $ { CMAKE_PLUGIN_LOCATION_RELEASE } \ " ) <nl> + + _populate_ $ $ { CMAKE_MODULE_NAME } _plugin_properties ( $ $ CMAKE_PLUGIN_NAME RELEASE \ " $ $ { CMAKE_PLUGIN_LOCATION_RELEASE } \ " ) <nl> + + endif ( ) <nl> + + if ( EXISTS \ " $ { _qt5 $ $ { CMAKE_MODULE_NAME } _install_prefix } / debug / $ $ { CMAKE_PLUGIN_DIR } $ $ { CMAKE_PLUGIN_LOCATION_DEBUG } \ " ) <nl> + + _populate_ $ $ { CMAKE_MODULE_NAME } _plugin_properties ( $ $ CMAKE_PLUGIN_NAME DEBUG \ " $ $ { CMAKE_PLUGIN_LOCATION_DEBUG } \ " ) <nl> + + endif ( ) <nl> + <nl> + list ( APPEND Qt5 $ $ { CMAKE_MODULE_NAME } _PLUGINS Qt5 : : $ $ CMAKE_PLUGIN_NAME ) <nl> mmm a / ports / qt5 - base / portfile . cmake <nl> ppp b / ports / qt5 - base / portfile . cmake <nl> qt_download_submodule ( OUT_SOURCE_PATH SOURCE_PATH <nl> # patches / static_opengl . patch # Use this patch if you really want to statically link angle on windows ( e . g . using - opengl es2 and - static ) . <nl> # Be carefull since it requires definining _GDI32_ for all dependent projects due to redefinition errors in the <nl> # the windows supplied gl . h header and the angle gl . h otherwise . <nl> + # CMake fixes <nl> + patches / Qt5BasicConfig . patch <nl> + patches / Qt5PluginTarget . patch <nl> patches / Qt5GuiConfigExtras . patch # Patches the library search behavior for EGL since angle is not build with Qt <nl> ) <nl> <nl> set ( ENV { _CL_ } " / utf - 8 " ) <nl> set ( CORE_OPTIONS <nl> - confirm - license <nl> - opensource <nl> - # - no - fontconfig <nl> # - simulator_and_device <nl> # - ltcg <nl> # - combined - angle - lib <nl> elseif ( VCPKG_TARGET_IS_OSX ) <nl> " SQLITE_LIBS = $ { SQLITE_RELEASE } - ldl - lpthread " <nl> " HARFBUZZ_LIBS = $ { HARFBUZZ_RELEASE } - framework ApplicationServices " <nl> " OPENSSL_LIBS = $ { SSL_RELEASE } $ { EAY_RELEASE } - ldl - lpthread " <nl> - " FONTCONFIG_LIBS = $ { FONTCONFIG_RELEASE } $ { FREETYPE_RELEASE } $ { EXPAT_RELEASE } " <nl> + " FONTCONFIG_LIBS = $ { FONTCONFIG_RELEASE } $ { FREETYPE_RELEASE } $ { EXPAT_RELEASE } - liconv " <nl> ) <nl> list ( APPEND DEBUG_OPTIONS <nl> " PSQL_LIBS = $ { PSQL_DEBUG } $ { SSL_DEBUG } $ { EAY_DEBUG } - ldl - lpthread " <nl> " SQLITE_LIBS = $ { SQLITE_DEBUG } - ldl - lpthread " <nl> " HARFBUZZ_LIBS = $ { HARFBUZZ_DEBUG } - framework ApplicationServices " <nl> " OPENSSL_LIBS = $ { SSL_DEBUG } $ { EAY_DEBUG } - ldl - lpthread " <nl> - " FONTCONFIG_LIBS = $ { FONTCONFIG_DEBUG } $ { FREETYPE_DEBUG } $ { EXPAT_DEBUG } " <nl> + " FONTCONFIG_LIBS = $ { FONTCONFIG_DEBUG } $ { FREETYPE_DEBUG } $ { EXPAT_DEBUG } - liconv " <nl> ) <nl> endif ( ) <nl> <nl> else ( ) <nl> file ( RENAME $ { CURRENT_PACKAGES_DIR } / lib / cmake $ { CURRENT_PACKAGES_DIR } / share / cmake ) <nl> file ( REMOVE_RECURSE $ { CURRENT_PACKAGES_DIR } / debug / lib / cmake ) # TODO : check if important debug information for cmake is lost <nl> <nl> - # This needs a new VCPKG policy . <nl> + # This needs a new VCPKG policy or a static angle build ( ANGLE needs to be fixed in VCPKG ! ) <nl> if ( VCPKG_TARGET_IS_WINDOWS AND $ { VCPKG_LIBRARY_LINKAGE } MATCHES " static " ) # Move angle dll libraries <nl> message ( STATUS " Moving ANGLE dlls from / bin to / tools / qt5 - angle / bin . In static builds dlls are not allowed in / bin " ) <nl> if ( EXISTS " $ { CURRENT_PACKAGES_DIR } / bin " ) <nl> else ( ) <nl> endif ( ) <nl> endif ( ) <nl> <nl> - # TODO : Replace python script with cmake script <nl> - vcpkg_execute_required_process ( <nl> - COMMAND $ { PYTHON3 } $ { CMAKE_CURRENT_LIST_DIR } / fixcmake . py <nl> - WORKING_DIRECTORY $ { CURRENT_PACKAGES_DIR } / share / cmake <nl> - LOGNAME fix - cmake <nl> - ) <nl> + # # Fix location of qtmain ( d ) . lib . Has been moved into manual - link . Add debug version <nl> + if ( VCPKG_TARGET_IS_WINDOWS AND NOT VCPKG_BUILD_TYPE ) <nl> + set ( cmakefile " $ { CURRENT_PACKAGES_DIR } / share / cmake / Qt5Core / Qt5CoreConfigExtras . cmake " ) <nl> + file ( READ " $ { cmakefile } " _contents ) <nl> + string ( REPLACE " set_property ( TARGET Qt5 : : WinMain APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE ) " " set_property ( TARGET Qt5 : : WinMain APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE DEBUG ) " _contents " $ { _contents } " ) <nl> + string ( REPLACE <nl> + [ [ set ( imported_location " $ { _qt5Core_install_prefix } / lib / qtmain . lib " ) ] ] <nl> + [ [ set ( imported_location_release " $ { _qt5Core_install_prefix } / lib / manual - link / qtmain . lib " ) <nl> + set ( imported_location_debug " $ { _qt5Core_install_prefix } / debug / lib / manual - link / qtmaind . lib " ) ] ] <nl> + _contents " $ { _contents } " ) <nl> + string ( REPLACE <nl> + [ [ set_target_properties ( Qt5 : : WinMain PROPERTIES <nl> + IMPORTED_LOCATION_RELEASE $ { imported_location } <nl> + ) ] ] <nl> + [ [ set_target_properties ( Qt5 : : WinMain PROPERTIES <nl> + IMPORTED_LOCATION_RELEASE $ { imported_location_release } <nl> + IMPORTED_LOCATION_DEBUG $ { imported_location_debug } <nl> + ) ] ] <nl> + _contents " $ { _contents } " ) <nl> + file ( WRITE " $ { cmakefile } " " $ { _contents } " ) <nl> + endif ( ) <nl> + <nl> file ( COPY $ { CMAKE_CURRENT_LIST_DIR } / vcpkg - cmake - wrapper . cmake DESTINATION $ { CURRENT_PACKAGES_DIR } / share / qt5core ) <nl> if ( EXISTS $ { CURRENT_PACKAGES_DIR } / tools / qt5 / bin ) <nl> file ( COPY $ { CURRENT_PACKAGES_DIR } / tools / qt5 / bin DESTINATION $ { CURRENT_PACKAGES_DIR } / tools / $ { PORT } ) <nl> else ( ) <nl> if ( EXISTS $ { CURRENT_PACKAGES_DIR } / tools / qt5 / bin / qt . conf ) <nl> file ( REMOVE " $ { CURRENT_PACKAGES_DIR } / tools / qt5 / bin / qt . conf " ) <nl> endif ( ) <nl> - <nl> + set ( CURRENT_INSTALLED_DIR_BACKUP " $ { CURRENT_INSTALLED_DIR } " ) <nl> + set ( CURRENT_INSTALLED_DIR " . / . . / . . / . . " ) # Making the qt . conf relative and not absolute <nl> + configure_file ( $ { CURRENT_PACKAGES_DIR } / tools / qt5 / qt_release . conf $ { CURRENT_PACKAGES_DIR } / tools / qt5 / bin / qt . conf ) # This makes the tools at least useable for release <nl> + set ( CURRENT_INSTALLED_DIR " $ { CURRENT_INSTALLED_DIR_BACKUP } " ) <nl> + <nl> qt_install_copyright ( $ { SOURCE_PATH } ) <nl> endif ( ) <nl> # install scripts for other qt ports <nl> file ( COPY <nl> - $ { CMAKE_CURRENT_LIST_DIR } / fixcmake . py <nl> $ { CMAKE_CURRENT_LIST_DIR } / cmake / qt_port_hashes . cmake <nl> $ { CMAKE_CURRENT_LIST_DIR } / cmake / qt_port_functions . cmake <nl> $ { CMAKE_CURRENT_LIST_DIR } / cmake / qt_fix_makefile_install . cmake <nl> mmm a / ports / qt5 - base / vcpkg - cmake - wrapper . cmake <nl> ppp b / ports / qt5 - base / vcpkg - cmake - wrapper . cmake <nl> _find_package ( $ { ARGS } ) <nl> function ( add_qt_library _target ) <nl> foreach ( _lib IN LISTS ARGN ) <nl> # The fact that we are within this file means we are using the VCPKG toolchain . Has such we only need to search in VCPKG paths ! <nl> - find_library ( $ { _lib } _LIBRARY_DEBUG NAMES $ { _lib } d $ { _lib } NAMES_PER_DIR PATH_SUFFIXES lib plugins / platforms PATHS " $ { _VCPKG_INSTALLED_DIR } / $ { VCPKG_TARGET_TRIPLET } / debug " NO_DEFAULT_PATH ) <nl> + find_library ( $ { _lib } _LIBRARY_DEBUG NAMES $ { _lib } _debug $ { _lib } d $ { _lib } NAMES_PER_DIR PATH_SUFFIXES lib plugins / platforms PATHS " $ { _VCPKG_INSTALLED_DIR } / $ { VCPKG_TARGET_TRIPLET } / debug " NO_DEFAULT_PATH ) <nl> find_library ( $ { _lib } _LIBRARY_RELEASE NAMES $ { _lib } NAMES_PER_DIR PATH_SUFFIXES lib plugins / platforms PATHS " $ { _VCPKG_INSTALLED_DIR } / $ { VCPKG_TARGET_TRIPLET } " NO_DEFAULT_PATH ) <nl> if ( $ { _lib } _LIBRARY_RELEASE ) <nl> list ( APPEND interface_lib \ $ < \ $ < NOT : \ $ < CONFIG : DEBUG > > : $ { $ { _lib } _LIBRARY_RELEASE } > ) <nl> mmm a / ports / qt5 - imageformats / CONTROL <nl> ppp b / ports / qt5 - imageformats / CONTROL <nl> <nl> Source : qt5 - imageformats <nl> - Version : 5 . 12 . 5 - 2 <nl> + Version : 5 . 12 . 5 - 3 <nl> Description : Qt5 Image Formats Module - Plugins for additional image formats : TIFF , MNG , TGA , WBMP <nl> Build - Depends : qt5 - base , tiff , libwebp , jasper <nl> mmm a / ports / qt5 - imageformats / portfile . cmake <nl> ppp b / ports / qt5 - imageformats / portfile . cmake <nl> find_library ( TIFF_DEBUG NAMES tiffd PATHS " $ { CURRENT_INSTALLED_DIR } / debug / lib " N <nl> <nl> find_library ( JASPER_RELEASE NAMES jasper PATHS " $ { CURRENT_INSTALLED_DIR } / lib " NO_DEFAULT_PATH ) <nl> find_library ( JASPER_DEBUG NAMES jasperd jasper libjasperd libjasper PATHS " $ { CURRENT_INSTALLED_DIR } / debug / lib " NO_DEFAULT_PATH ) <nl> - find_library ( FREEGLUT_RELEASE NAMES freeglut glut PATHS " $ { CURRENT_INSTALLED_DIR } / lib " NO_DEFAULT_PATH ) <nl> - find_library ( FREEGLUT_DEBUG NAMES freeglutd freeglut glutd glut PATHS " $ { CURRENT_INSTALLED_DIR } / debug / lib " NO_DEFAULT_PATH ) <nl> + if ( NOT VCPKG_TARGET_IS_OSX ) <nl> + find_library ( FREEGLUT_RELEASE NAMES freeglut glut PATHS " $ { CURRENT_INSTALLED_DIR } / lib " NO_DEFAULT_PATH ) <nl> + find_library ( FREEGLUT_DEBUG NAMES freeglutd freeglut glutd glut PATHS " $ { CURRENT_INSTALLED_DIR } / debug / lib " NO_DEFAULT_PATH ) <nl> + endif ( ) <nl> <nl> find_library ( WEBP_RELEASE NAMES webp PATHS " $ { CURRENT_INSTALLED_DIR } / lib " NO_DEFAULT_PATH ) <nl> find_library ( WEBP_DEBUG NAMES webpd webp PATHS " $ { CURRENT_INSTALLED_DIR } / debug / lib " NO_DEFAULT_PATH ) <nl> mmm a / scripts / ci . baseline . txt <nl> ppp b / scripts / ci . baseline . txt <nl> cudnn : arm - uwp = fail <nl> cudnn : x64 - uwp = fail <nl> cudnn : x64 - windows - static = fail <nl> cudnn : x86 - windows = fail <nl> - cutelyst2 : x64 - osx = fail <nl> date : arm64 - windows = fail <nl> dbow2 : x64 - osx = fail <nl> dcmtk : arm64 - windows = fail <nl> jemalloc : x64 - uwp = fail <nl> jemalloc : x64 - windows - static = fail <nl> jinja2cpplight : arm - uwp = fail <nl> jinja2cpplight : x64 - uwp = fail <nl> - kd - soap : x64 - osx = fail <nl> keystone : arm64 - windows = fail <nl> keystone : arm - uwp = fail <nl> keystone : x64 - uwp = fail <nl> mpir : arm64 - windows = fail <nl> mpir : arm - uwp = fail <nl> mpir : x64 - uwp = fail <nl> <nl> - # Conflicts with angle and qt - 5base <nl> + # Conflicts with angle <nl> ms - angle : arm64 - windows = skip <nl> ms - angle : arm - uwp = skip <nl> ms - angle : x64 - linux = skip <nl> osgearth : x64 - osx = fail <nl> osgearth : x64 - linux = fail <nl> osgearth : x64 - windows - static = fail <nl> osg - qt : x64 - windows - static = fail <nl> + # Missing Fontconfig linkage in vcpkg_cmake_wrapper . Will be fixed by # 9860 with the removal of the wrapper . <nl> + osg - qt : x64 - osx = fail <nl> osg - qt : x64 - linux = ignore <nl> otl : x64 - windows = ignore <nl> otl : x64 - windows - static = ignore <nl> python3 : x64 - uwp = fail <nl> qca : x64 - linux = fail <nl> qca : x64 - osx = fail <nl> qca : x64 - windows - static = fail <nl> - qcustomplot : x64 - osx = fail <nl> qhull : arm - uwp = ignore <nl> qhull : x64 - windows - static = ignore <nl> qhull : x64 - uwp = ignore <nl> qpid - proton : arm - uwp = fail <nl> qpid - proton : x64 - uwp = fail <nl> qpid - proton : x64 - windows - static = fail <nl> qscintilla : x64 - linux = fail <nl> - qscintilla : x64 - osx = fail <nl> qt5 - activeqt : x64 - linux = fail <nl> qt5 - activeqt : x64 - osx = fail <nl> qt5 - macextras : x64 - linux = fail <nl>
[ qt5 - base , qt5 - imageformat ] fix issues on osx ( )
microsoft/vcpkg
941d5464544eb5812b2cbd6e687c3ebc78ed2624
2020-04-10T00:08:01Z
mmm a / lib / IRGen / GenEnum . cpp <nl> ppp b / lib / IRGen / GenEnum . cpp <nl> namespace { <nl> std : : tie ( payloadTag , extraTag ) = getNoPayloadCaseValue ( Case ) ; <nl> <nl> auto & ti = getFixedPayloadTypeInfo ( ) ; <nl> - bool hasExtraInhabitants = ti . getFixedExtraInhabitantCount ( IGF . IGM ) > 0 ; <nl> - <nl> + <nl> llvm : : Value * payloadResult = nullptr ; <nl> - if ( hasExtraInhabitants ) <nl> - payloadResult = payload . emitCompare ( IGF , <nl> + / / We can omit the payload check if this is the only case represented with <nl> + / / the particular extra tag bit pattern set . <nl> + / / <nl> + / / TODO : This logic covers the most common case , when there ' s exactly one <nl> + / / more no - payload case than extra inhabitants in the payload . This could <nl> + / / be slightly generalized to cases where there ' s multiple tag bits and <nl> + / / exactly one no - payload case in the highest used tag value . <nl> + if ( ! tagBits | | <nl> + ElementsWithNoPayload . size ( ) ! = getFixedExtraInhabitantCount ( IGF . IGM ) + 1 ) <nl> + payloadResult = payload . emitCompare ( IGF , <nl> ti . getFixedExtraInhabitantMask ( IGF . IGM ) , <nl> - payloadTag ) ; <nl> + payloadTag ) ; <nl> <nl> / / If any tag bits are present , they must match . <nl> llvm : : Value * tagResult = nullptr ; <nl> new file mode 100644 <nl> index 000000000000 . . 1872509a0b51 <nl> mmm / dev / null <nl> ppp b / test / IRGen / select_enum_single_payload . sil <nl> <nl> + / / RUN : % target - swift - frontend % s - emit - ir | FileCheck % s <nl> + sil_stage canonical <nl> + <nl> + import Builtin <nl> + <nl> + enum ManyEmptyCases { <nl> + case A <nl> + case B <nl> + case C ( Builtin . Int64 ) <nl> + } <nl> + <nl> + / / CHECK - LABEL : define i1 @ select_enum_A ( i64 , i1 ) <nl> + / / CHECK : [ [ PAYLOAD : % . * ] ] = icmp eq i64 % 0 , 0 <nl> + / / CHECK : [ [ EXTRA : % . * ] ] = and i1 % 1 , [ [ PAYLOAD ] ] <nl> + / / CHECK : ret i1 [ [ EXTRA ] ] <nl> + sil @ select_enum_A : $ @ convention ( thin ) ( ManyEmptyCases ) - > Builtin . Int1 { <nl> + entry ( % 0 : $ ManyEmptyCases ) : <nl> + % 4 = integer_literal $ Builtin . Int1 , - 1 / / user : % 6 <nl> + % 5 = integer_literal $ Builtin . Int1 , 0 / / user : % 6 <nl> + % 6 = select_enum % 0 : $ ManyEmptyCases , case # ManyEmptyCases . A ! enumelt : % 4 , default % 5 : $ Builtin . Int1 <nl> + return % 6 : $ Builtin . Int1 <nl> + } <nl> + <nl> + / / CHECK - LABEL : define i1 @ select_enum_B ( i64 , i1 ) <nl> + / / CHECK : [ [ PAYLOAD : % . * ] ] = icmp eq i64 % 0 , 1 <nl> + / / CHECK : [ [ EXTRA : % . * ] ] = and i1 % 1 , [ [ PAYLOAD ] ] <nl> + / / CHECK : ret i1 [ [ EXTRA ] ] <nl> + sil @ select_enum_B : $ @ convention ( thin ) ( ManyEmptyCases ) - > Builtin . Int1 { <nl> + entry ( % 0 : $ ManyEmptyCases ) : <nl> + % 4 = integer_literal $ Builtin . Int1 , - 1 / / user : % 6 <nl> + % 5 = integer_literal $ Builtin . Int1 , 0 / / user : % 6 <nl> + % 6 = select_enum % 0 : $ ManyEmptyCases , case # ManyEmptyCases . B ! enumelt : % 4 , default % 5 : $ Builtin . Int1 <nl> + return % 6 : $ Builtin . Int1 <nl> + } <nl>
Reapply " IRGen : Fix select_enum for single - payload , multi - empty - case enums . "
apple/swift
299a5681eac59630e2c1d3c4a4d6d9212cd85e7e
2015-12-17T01:08:51Z
mmm a / Marlin / src / lcd / extensible_ui / ui_api . cpp <nl> ppp b / Marlin / src / lcd / extensible_ui / ui_api . cpp <nl> namespace ExtUI { <nl> & & ( linked_nozzles | | active_extruder = = 0 ) <nl> # endif <nl> ) zprobe_zoffset + = mm ; <nl> + # else <nl> + UNUSED ( mm ) ; <nl> # endif <nl> <nl> # if EXTRUDERS > 1 <nl> namespace ExtUI { <nl> } <nl> # else <nl> UNUSED ( linked_nozzles ) ; <nl> + UNUSED ( mm ) ; <nl> # endif <nl> } <nl> <nl>
Fix unused variable warnings ( )
MarlinFirmware/Marlin
57ed063ba11fd58222aa349d8af70b07f3c4935b
2019-07-17T08:16:10Z
mmm a / doc / classes / CanvasItem . xml <nl> ppp b / doc / classes / CanvasItem . xml <nl> <nl> < return type = " void " > <nl> < / return > <nl> < description > <nl> + Forces the transform to update . Transform changes in physics are not instant for performance reasons . Transforms are accumulated and then set . Use this if you need an up - to - date transform when doing physics operations . <nl> < / description > <nl> < / method > <nl> < method name = " get_canvas " qualifiers = " const " > <nl> mmm a / doc / classes / Spatial . xml <nl> ppp b / doc / classes / Spatial . xml <nl> <nl> < return type = " void " > <nl> < / return > <nl> < description > <nl> + Forces the transform to update . Transform changes in physics are not instant for performance reasons . Transforms are accumulated and then set . Use this if you need an up - to - date transform when doing physics operations . <nl> < / description > <nl> < / method > <nl> < method name = " get_parent_spatial " qualifiers = " const " > <nl>
Merge pull request from Jummit / document - force_update_transform
godotengine/godot
d93201b27f696d2b73902d73e6e29967f684dedb
2019-11-01T13:03:01Z
mmm a / db / db . cpp <nl> ppp b / db / db . cpp <nl> void connThread ( ) <nl> resp . setData ( opReply , " i am fine " ) ; <nl> dbMsgPort . reply ( m , resp ) ; <nl> if ( end ) { <nl> - cout < < curTimeMillis ( ) % 10000 < < " end msg " < < endl ; <nl> - dbMsgPort . shutdown ( ) ; <nl> - sleepmillis ( 500 ) ; <nl> - problem ( ) < < " exiting end msg " < < endl ; <nl> - exit ( EXIT_SUCCESS ) ; <nl> + cout < < curTimeMillis ( ) % 10000 < < " end msg " < < dbMsgPort . farEnd . toString ( ) < < endl ; <nl> + if ( dbMsgPort . farEnd . isLocalHost ( ) ) { <nl> + dbMsgPort . shutdown ( ) ; <nl> + sleepmillis ( 500 ) ; <nl> + problem ( ) < < " exiting end msg " < < endl ; <nl> + exit ( EXIT_SUCCESS ) ; <nl> + } <nl> + else { <nl> + cout < < " ( not from localhost , ignoring end msg ) " < < endl ; <nl> + } <nl> } <nl> } <nl> else if ( m . data - > operation = = dbQuery ) { <nl> void msg ( const char * m , const char * address , int port , int extras = 0 ) { <nl> if ( ! p . connect ( db ) ) <nl> return ; <nl> <nl> - for ( int q = 0 ; q < 3 ; q + + ) { <nl> + const int Loops = 1 ; <nl> + for ( int q = 0 ; q < Loops ; q + + ) { <nl> Message send ; <nl> Message response ; <nl> <nl> void msg ( const char * m , const char * address , int port , int extras = 0 ) { <nl> double tm = t . micros ( ) + 1 ; <nl> cout < < " * * * * ok . response . data : " < < ok < < " time : " < < tm / 1000 . 0 < < " ms " < < <nl> ( ( double ) len ) * 8 / 1000000 / ( tm / 1000000 ) < < " Mbps " < < endl ; <nl> - if ( q + 1 < 3 ) { <nl> - cout < < " \ t \ tSLEEP 8 then sending again " < < endl ; <nl> + if ( q + 1 < Loops ) { <nl> + cout < < " \ t \ tSLEEP 8 then sending again as a test " < < endl ; <nl> sleepsecs ( 8 ) ; <nl> } <nl> } <nl> + sleepsecs ( 1 ) ; <nl> <nl> p . shutdown ( ) ; <nl> } <nl>
db msg end only works for localhost now for security .
mongodb/mongo
683671d4bbd039d88144d01061e9f37a58262cf4
2008-06-13T18:28:43Z
mmm a / AUTHORS <nl> ppp b / AUTHORS <nl> Developers : <nl> <nl> hannon235 ( Chris ) <nl> Fixing a bug that the submenu of ExtensionTest in TestCpp can ' t scroll . <nl> + Implements a socket . io client extension and adds a test case . <nl> <nl> pktangyue <nl> Fixing a bug that CCScale9Sprite : : setInsetLeft / XXX can ' t work for rotated sprite frame . <nl>
Update AUTHORS [ ci skip ]
cocos2d/cocos2d-x
0b6f09f08f80c61fd68dc00bca3b0960963b45ec
2013-07-15T03:42:07Z
mmm a / src / compiler / ia32 / code - generator - ia32 . cc <nl> ppp b / src / compiler / ia32 / code - generator - ia32 . cc <nl> void CodeGenerator : : AssembleArchInstruction ( Instruction * instr ) { <nl> } <nl> break ; <nl> case kIA32Cmp : <nl> - if ( HasImmediateInput ( instr , 1 ) ) { <nl> - __ cmp ( i . InputOperand ( 0 ) , i . InputImmediate ( 1 ) ) ; <nl> + if ( AddressingModeField : : decode ( instr - > opcode ( ) ) ! = kMode_None ) { <nl> + size_t index = 0 ; <nl> + Operand operand = i . MemoryOperand ( & index ) ; <nl> + if ( HasImmediateInput ( instr , index ) ) { <nl> + __ cmp ( operand , i . InputImmediate ( index ) ) ; <nl> + } else { <nl> + __ cmp ( operand , i . InputRegister ( index ) ) ; <nl> + } <nl> } else { <nl> - __ cmp ( i . InputRegister ( 0 ) , i . InputOperand ( 1 ) ) ; <nl> + if ( HasImmediateInput ( instr , 1 ) ) { <nl> + __ cmp ( i . InputOperand ( 0 ) , i . InputImmediate ( 1 ) ) ; <nl> + } else { <nl> + __ cmp ( i . InputRegister ( 0 ) , i . InputOperand ( 1 ) ) ; <nl> + } <nl> } <nl> break ; <nl> case kIA32Test : <nl> - if ( HasImmediateInput ( instr , 1 ) ) { <nl> - __ test ( i . InputOperand ( 0 ) , i . InputImmediate ( 1 ) ) ; <nl> + if ( AddressingModeField : : decode ( instr - > opcode ( ) ) ! = kMode_None ) { <nl> + size_t index = 0 ; <nl> + Operand operand = i . MemoryOperand ( & index ) ; <nl> + if ( HasImmediateInput ( instr , index ) ) { <nl> + __ test ( operand , i . InputImmediate ( index ) ) ; <nl> + } else { <nl> + __ test ( i . InputRegister ( index ) , operand ) ; <nl> + } <nl> } else { <nl> - __ test ( i . InputRegister ( 0 ) , i . InputOperand ( 1 ) ) ; <nl> + if ( HasImmediateInput ( instr , 1 ) ) { <nl> + __ test ( i . InputOperand ( 0 ) , i . InputImmediate ( 1 ) ) ; <nl> + } else { <nl> + __ test ( i . InputRegister ( 0 ) , i . InputOperand ( 1 ) ) ; <nl> + } <nl> } <nl> break ; <nl> case kIA32Imul : <nl> mmm a / src / compiler / ia32 / instruction - selector - ia32 . cc <nl> ppp b / src / compiler / ia32 / instruction - selector - ia32 . cc <nl> bool InstructionSelector : : IsTailCallAddressImmediate ( ) { return true ; } <nl> <nl> namespace { <nl> <nl> + void VisitCompareWithMemoryOperand ( InstructionSelector * selector , <nl> + InstructionCode opcode , Node * left , <nl> + InstructionOperand right , <nl> + FlagsContinuation * cont ) { <nl> + DCHECK ( left - > opcode ( ) = = IrOpcode : : kLoad ) ; <nl> + IA32OperandGenerator g ( selector ) ; <nl> + size_t input_count = 0 ; <nl> + InstructionOperand inputs [ 6 ] ; <nl> + AddressingMode addressing_mode = <nl> + g . GetEffectiveAddressMemoryOperand ( left , inputs , & input_count ) ; <nl> + opcode | = AddressingModeField : : encode ( addressing_mode ) ; <nl> + opcode = cont - > Encode ( opcode ) ; <nl> + inputs [ input_count + + ] = right ; <nl> + <nl> + if ( cont - > IsBranch ( ) ) { <nl> + inputs [ input_count + + ] = g . Label ( cont - > true_block ( ) ) ; <nl> + inputs [ input_count + + ] = g . Label ( cont - > false_block ( ) ) ; <nl> + selector - > Emit ( opcode , 0 , nullptr , input_count , inputs ) ; <nl> + } else { <nl> + DCHECK ( cont - > IsSet ( ) ) ; <nl> + InstructionOperand output = g . DefineAsRegister ( cont - > result ( ) ) ; <nl> + selector - > Emit ( opcode , 1 , & output , input_count , inputs ) ; <nl> + } <nl> + } <nl> + <nl> + / / Determines if { input } of { node } can be replaced by a memory operand . <nl> + bool CanUseMemoryOperand ( InstructionSelector * selector , InstructionCode opcode , <nl> + Node * node , Node * input ) { <nl> + if ( input - > opcode ( ) ! = IrOpcode : : kLoad | | ! selector - > CanCover ( node , input ) ) { <nl> + return false ; <nl> + } <nl> + MachineRepresentation load_representation = <nl> + LoadRepresentationOf ( input - > op ( ) ) . representation ( ) ; <nl> + if ( load_representation = = MachineRepresentation : : kWord32 | | <nl> + load_representation = = MachineRepresentation : : kTagged ) { <nl> + return opcode = = kIA32Cmp | | opcode = = kIA32Test ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> / / Shared routine for multiple compare operations . <nl> void VisitCompare ( InstructionSelector * selector , InstructionCode opcode , <nl> InstructionOperand left , InstructionOperand right , <nl> void VisitFloat64Compare ( InstructionSelector * selector , Node * node , <nl> VisitCompare ( selector , kSSEFloat64Cmp , right , left , cont , false ) ; <nl> } <nl> <nl> - <nl> / / Shared routine for multiple word compare operations . <nl> void VisitWordCompare ( InstructionSelector * selector , Node * node , <nl> InstructionCode opcode , FlagsContinuation * cont ) { <nl> IA32OperandGenerator g ( selector ) ; <nl> - Node * const left = node - > InputAt ( 0 ) ; <nl> - Node * const right = node - > InputAt ( 1 ) ; <nl> + Node * left = node - > InputAt ( 0 ) ; <nl> + Node * right = node - > InputAt ( 1 ) ; <nl> + <nl> + / / If one of the two inputs is an immediate , make sure it ' s on the right . <nl> + if ( ! g . CanBeImmediate ( right ) & & g . CanBeImmediate ( left ) ) { <nl> + if ( ! node - > op ( ) - > HasProperty ( Operator : : kCommutative ) ) cont - > Commute ( ) ; <nl> + std : : swap ( left , right ) ; <nl> + } <nl> <nl> - / / Match immediates on left or right side of comparison . <nl> + / / Match immediates on right side of comparison . <nl> if ( g . CanBeImmediate ( right ) ) { <nl> - VisitCompare ( selector , opcode , g . Use ( left ) , g . UseImmediate ( right ) , cont ) ; <nl> - } else if ( g . CanBeImmediate ( left ) ) { <nl> + if ( CanUseMemoryOperand ( selector , opcode , node , left ) ) { <nl> + return VisitCompareWithMemoryOperand ( selector , opcode , left , <nl> + g . UseImmediate ( right ) , cont ) ; <nl> + } <nl> + return VisitCompare ( selector , opcode , g . Use ( left ) , g . UseImmediate ( right ) , <nl> + cont ) ; <nl> + } <nl> + <nl> + if ( g . CanBeBetterLeftOperand ( right ) ) { <nl> if ( ! node - > op ( ) - > HasProperty ( Operator : : kCommutative ) ) cont - > Commute ( ) ; <nl> - VisitCompare ( selector , opcode , g . Use ( right ) , g . UseImmediate ( left ) , cont ) ; <nl> - } else { <nl> - VisitCompare ( selector , opcode , left , right , cont , <nl> - node - > op ( ) - > HasProperty ( Operator : : kCommutative ) ) ; <nl> + std : : swap ( left , right ) ; <nl> } <nl> - } <nl> <nl> + if ( CanUseMemoryOperand ( selector , opcode , node , left ) ) { <nl> + return VisitCompareWithMemoryOperand ( selector , opcode , left , <nl> + g . UseRegister ( right ) , cont ) ; <nl> + } <nl> + return VisitCompare ( selector , opcode , left , right , cont , <nl> + node - > op ( ) - > HasProperty ( Operator : : kCommutative ) ) ; <nl> + } <nl> <nl> void VisitWordCompare ( InstructionSelector * selector , Node * node , <nl> FlagsContinuation * cont ) { <nl> mmm a / src / compiler / instruction - selector . cc <nl> ppp b / src / compiler / instruction - selector . cc <nl> InstructionSelector : : InstructionSelector ( <nl> instructions_ ( zone ) , <nl> defined_ ( node_count , false , zone ) , <nl> used_ ( node_count , false , zone ) , <nl> + effect_level_ ( node_count , 0 , zone ) , <nl> virtual_registers_ ( node_count , <nl> InstructionOperand : : kInvalidVirtualRegister , zone ) , <nl> scheduler_ ( nullptr ) , <nl> Instruction * InstructionSelector : : Emit ( Instruction * instr ) { <nl> <nl> bool InstructionSelector : : CanCover ( Node * user , Node * node ) const { <nl> return node - > OwnedBy ( user ) & & <nl> - schedule ( ) - > block ( node ) = = schedule ( ) - > block ( user ) ; <nl> + schedule ( ) - > block ( node ) = = schedule ( ) - > block ( user ) & & <nl> + ( node - > op ( ) - > HasProperty ( Operator : : kPure ) | | <nl> + GetEffectLevel ( node ) = = GetEffectLevel ( user ) ) ; <nl> } <nl> <nl> - <nl> int InstructionSelector : : GetVirtualRegister ( const Node * node ) { <nl> DCHECK_NOT_NULL ( node ) ; <nl> size_t const id = node - > id ( ) ; <nl> void InstructionSelector : : MarkAsUsed ( Node * node ) { <nl> used_ [ id ] = true ; <nl> } <nl> <nl> + int InstructionSelector : : GetEffectLevel ( Node * node ) const { <nl> + DCHECK_NOT_NULL ( node ) ; <nl> + size_t const id = node - > id ( ) ; <nl> + DCHECK_LT ( id , effect_level_ . size ( ) ) ; <nl> + return effect_level_ [ id ] ; <nl> + } <nl> + <nl> + void InstructionSelector : : SetEffectLevel ( Node * node , int effect_level ) { <nl> + DCHECK_NOT_NULL ( node ) ; <nl> + size_t const id = node - > id ( ) ; <nl> + DCHECK_LT ( id , effect_level_ . size ( ) ) ; <nl> + effect_level_ [ id ] = effect_level ; <nl> + } <nl> <nl> void InstructionSelector : : MarkAsRepresentation ( MachineRepresentation rep , <nl> const InstructionOperand & op ) { <nl> void InstructionSelector : : VisitBlock ( BasicBlock * block ) { <nl> current_block_ = block ; <nl> int current_block_end = static_cast < int > ( instructions_ . size ( ) ) ; <nl> <nl> + int effect_level = 0 ; <nl> + for ( Node * const node : * block ) { <nl> + if ( node - > opcode ( ) = = IrOpcode : : kStore | | <nl> + node - > opcode ( ) = = IrOpcode : : kCheckedStore | | <nl> + node - > opcode ( ) = = IrOpcode : : kCall ) { <nl> + + + effect_level ; <nl> + } <nl> + SetEffectLevel ( node , effect_level ) ; <nl> + } <nl> + <nl> / / Generate code for the block control " top down " , but schedule the code <nl> / / " bottom up " . <nl> VisitControl ( block ) ; <nl> mmm a / src / compiler / instruction - selector . h <nl> ppp b / src / compiler / instruction - selector . h <nl> class InstructionSelector final { <nl> / / Checks if { node } is currently live . <nl> bool IsLive ( Node * node ) const { return ! IsDefined ( node ) & & IsUsed ( node ) ; } <nl> <nl> + / / Gets the effect level of { node } . <nl> + int GetEffectLevel ( Node * node ) const ; <nl> + <nl> int GetVirtualRegister ( const Node * node ) ; <nl> const std : : map < NodeId , int > GetVirtualRegistersForTesting ( ) const ; <nl> <nl> class InstructionSelector final { <nl> / / will need to generate code for it . <nl> void MarkAsUsed ( Node * node ) ; <nl> <nl> + / / Sets the effect level of { node } . <nl> + void SetEffectLevel ( Node * node , int effect_level ) ; <nl> + <nl> / / Inform the register allocation of the representation of the value produced <nl> / / by { node } . <nl> void MarkAsRepresentation ( MachineRepresentation rep , Node * node ) ; <nl> class InstructionSelector final { <nl> ZoneVector < Instruction * > instructions_ ; <nl> BoolVector defined_ ; <nl> BoolVector used_ ; <nl> + IntVector effect_level_ ; <nl> IntVector virtual_registers_ ; <nl> InstructionScheduler * scheduler_ ; <nl> Frame * frame_ ; <nl> mmm a / src / compiler / x64 / code - generator - x64 . cc <nl> ppp b / src / compiler / x64 / code - generator - x64 . cc <nl> class OutOfLineRecordWrite final : public OutOfLineCode { <nl> } \ <nl> } while ( 0 ) <nl> <nl> + # define ASSEMBLE_COMPARE ( asm_instr ) \ <nl> + do { \ <nl> + if ( AddressingModeField : : decode ( instr - > opcode ( ) ) ! = kMode_None ) { \ <nl> + size_t index = 0 ; \ <nl> + Operand left = i . MemoryOperand ( & index ) ; \ <nl> + if ( HasImmediateInput ( instr , index ) ) { \ <nl> + __ asm_instr ( left , i . InputImmediate ( index ) ) ; \ <nl> + } else { \ <nl> + __ asm_instr ( left , i . InputRegister ( index ) ) ; \ <nl> + } \ <nl> + } else { \ <nl> + if ( HasImmediateInput ( instr , 1 ) ) { \ <nl> + if ( instr - > InputAt ( 0 ) - > IsRegister ( ) ) { \ <nl> + __ asm_instr ( i . InputRegister ( 0 ) , i . InputImmediate ( 1 ) ) ; \ <nl> + } else { \ <nl> + __ asm_instr ( i . InputOperand ( 0 ) , i . InputImmediate ( 1 ) ) ; \ <nl> + } \ <nl> + } else { \ <nl> + if ( instr - > InputAt ( 1 ) - > IsRegister ( ) ) { \ <nl> + __ asm_instr ( i . InputRegister ( 0 ) , i . InputRegister ( 1 ) ) ; \ <nl> + } else { \ <nl> + __ asm_instr ( i . InputRegister ( 0 ) , i . InputOperand ( 1 ) ) ; \ <nl> + } \ <nl> + } \ <nl> + } \ <nl> + } while ( 0 ) <nl> <nl> # define ASSEMBLE_MULT ( asm_instr ) \ <nl> do { \ <nl> void CodeGenerator : : AssembleArchInstruction ( Instruction * instr ) { <nl> ASSEMBLE_BINOP ( andq ) ; <nl> break ; <nl> case kX64Cmp32 : <nl> - ASSEMBLE_BINOP ( cmpl ) ; <nl> + ASSEMBLE_COMPARE ( cmpl ) ; <nl> break ; <nl> case kX64Cmp : <nl> - ASSEMBLE_BINOP ( cmpq ) ; <nl> + ASSEMBLE_COMPARE ( cmpq ) ; <nl> break ; <nl> case kX64Test32 : <nl> - ASSEMBLE_BINOP ( testl ) ; <nl> + ASSEMBLE_COMPARE ( testl ) ; <nl> break ; <nl> case kX64Test : <nl> - ASSEMBLE_BINOP ( testq ) ; <nl> + ASSEMBLE_COMPARE ( testq ) ; <nl> break ; <nl> case kX64Imul32 : <nl> ASSEMBLE_MULT ( imull ) ; <nl> mmm a / src / compiler / x64 / instruction - selector - x64 . cc <nl> ppp b / src / compiler / x64 / instruction - selector - x64 . cc <nl> bool InstructionSelector : : IsTailCallAddressImmediate ( ) { return true ; } <nl> <nl> namespace { <nl> <nl> + void VisitCompareWithMemoryOperand ( InstructionSelector * selector , <nl> + InstructionCode opcode , Node * left , <nl> + InstructionOperand right , <nl> + FlagsContinuation * cont ) { <nl> + DCHECK ( left - > opcode ( ) = = IrOpcode : : kLoad ) ; <nl> + X64OperandGenerator g ( selector ) ; <nl> + size_t input_count = 0 ; <nl> + InstructionOperand inputs [ 6 ] ; <nl> + AddressingMode addressing_mode = <nl> + g . GetEffectiveAddressMemoryOperand ( left , inputs , & input_count ) ; <nl> + opcode | = AddressingModeField : : encode ( addressing_mode ) ; <nl> + opcode = cont - > Encode ( opcode ) ; <nl> + inputs [ input_count + + ] = right ; <nl> + <nl> + if ( cont - > IsBranch ( ) ) { <nl> + inputs [ input_count + + ] = g . Label ( cont - > true_block ( ) ) ; <nl> + inputs [ input_count + + ] = g . Label ( cont - > false_block ( ) ) ; <nl> + selector - > Emit ( opcode , 0 , nullptr , input_count , inputs ) ; <nl> + } else { <nl> + DCHECK ( cont - > IsSet ( ) ) ; <nl> + InstructionOperand output = g . DefineAsRegister ( cont - > result ( ) ) ; <nl> + selector - > Emit ( opcode , 1 , & output , input_count , inputs ) ; <nl> + } <nl> + } <nl> + <nl> + / / Determines if { input } of { node } can be replaced by a memory operand . <nl> + bool CanUseMemoryOperand ( InstructionSelector * selector , InstructionCode opcode , <nl> + Node * node , Node * input ) { <nl> + if ( input - > opcode ( ) ! = IrOpcode : : kLoad | | ! selector - > CanCover ( node , input ) ) { <nl> + return false ; <nl> + } <nl> + MachineRepresentation rep = <nl> + LoadRepresentationOf ( input - > op ( ) ) . representation ( ) ; <nl> + if ( rep = = MachineRepresentation : : kWord64 | | <nl> + rep = = MachineRepresentation : : kTagged ) { <nl> + return opcode = = kX64Cmp | | opcode = = kX64Test ; <nl> + } else if ( rep = = MachineRepresentation : : kWord32 ) { <nl> + return opcode = = kX64Cmp32 | | opcode = = kX64Test32 ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> / / Shared routine for multiple compare operations . <nl> void VisitCompare ( InstructionSelector * selector , InstructionCode opcode , <nl> InstructionOperand left , InstructionOperand right , <nl> void VisitCompare ( InstructionSelector * selector , InstructionCode opcode , <nl> VisitCompare ( selector , opcode , g . UseRegister ( left ) , g . Use ( right ) , cont ) ; <nl> } <nl> <nl> - <nl> / / Shared routine for multiple word compare operations . <nl> void VisitWordCompare ( InstructionSelector * selector , Node * node , <nl> InstructionCode opcode , FlagsContinuation * cont ) { <nl> X64OperandGenerator g ( selector ) ; <nl> - Node * const left = node - > InputAt ( 0 ) ; <nl> - Node * const right = node - > InputAt ( 1 ) ; <nl> + Node * left = node - > InputAt ( 0 ) ; <nl> + Node * right = node - > InputAt ( 1 ) ; <nl> + <nl> + / / If one of the two inputs is an immediate , make sure it ' s on the right . <nl> + if ( ! g . CanBeImmediate ( right ) & & g . CanBeImmediate ( left ) ) { <nl> + if ( ! node - > op ( ) - > HasProperty ( Operator : : kCommutative ) ) cont - > Commute ( ) ; <nl> + std : : swap ( left , right ) ; <nl> + } <nl> <nl> - / / Match immediates on left or right side of comparison . <nl> + / / Match immediates on right side of comparison . <nl> if ( g . CanBeImmediate ( right ) ) { <nl> - VisitCompare ( selector , opcode , g . Use ( left ) , g . UseImmediate ( right ) , cont ) ; <nl> - } else if ( g . CanBeImmediate ( left ) ) { <nl> + if ( CanUseMemoryOperand ( selector , opcode , node , left ) ) { <nl> + return VisitCompareWithMemoryOperand ( selector , opcode , left , <nl> + g . UseImmediate ( right ) , cont ) ; <nl> + } <nl> + return VisitCompare ( selector , opcode , g . Use ( left ) , g . UseImmediate ( right ) , <nl> + cont ) ; <nl> + } <nl> + <nl> + if ( g . CanBeBetterLeftOperand ( right ) ) { <nl> if ( ! node - > op ( ) - > HasProperty ( Operator : : kCommutative ) ) cont - > Commute ( ) ; <nl> - VisitCompare ( selector , opcode , g . Use ( right ) , g . UseImmediate ( left ) , cont ) ; <nl> - } else { <nl> - VisitCompare ( selector , opcode , left , right , cont , <nl> - node - > op ( ) - > HasProperty ( Operator : : kCommutative ) ) ; <nl> + std : : swap ( left , right ) ; <nl> } <nl> - } <nl> <nl> + if ( CanUseMemoryOperand ( selector , opcode , node , left ) ) { <nl> + return VisitCompareWithMemoryOperand ( selector , opcode , left , <nl> + g . UseRegister ( right ) , cont ) ; <nl> + } <nl> + return VisitCompare ( selector , opcode , left , right , cont , <nl> + node - > op ( ) - > HasProperty ( Operator : : kCommutative ) ) ; <nl> + } <nl> <nl> / / Shared routine for 64 - bit word comparison operations . <nl> void VisitWord64Compare ( InstructionSelector * selector , Node * node , <nl> mmm a / src / ia32 / assembler - ia32 . cc <nl> ppp b / src / ia32 / assembler - ia32 . cc <nl> void Assembler : : cmp ( Register reg , const Operand & op ) { <nl> emit_operand ( reg , op ) ; <nl> } <nl> <nl> + void Assembler : : cmp ( const Operand & op , Register reg ) { <nl> + EnsureSpace ensure_space ( this ) ; <nl> + EMIT ( 0x39 ) ; <nl> + emit_operand ( reg , op ) ; <nl> + } <nl> <nl> void Assembler : : cmp ( const Operand & op , const Immediate & imm ) { <nl> EnsureSpace ensure_space ( this ) ; <nl> mmm a / src / ia32 / assembler - ia32 . h <nl> ppp b / src / ia32 / assembler - ia32 . h <nl> class Assembler : public AssemblerBase { <nl> void cmp ( Register reg0 , Register reg1 ) { cmp ( reg0 , Operand ( reg1 ) ) ; } <nl> void cmp ( Register reg , const Operand & op ) ; <nl> void cmp ( Register reg , const Immediate & imm ) { cmp ( Operand ( reg ) , imm ) ; } <nl> + void cmp ( const Operand & op , Register reg ) ; <nl> void cmp ( const Operand & op , const Immediate & imm ) ; <nl> void cmp ( const Operand & op , Handle < Object > handle ) ; <nl> <nl> mmm a / src / ia32 / disasm - ia32 . cc <nl> ppp b / src / ia32 / disasm - ia32 . cc <nl> struct ByteMnemonic { <nl> OperandOrder op_order_ ; <nl> } ; <nl> <nl> - <nl> static const ByteMnemonic two_operands_instr [ ] = { <nl> - { 0x01 , " add " , OPER_REG_OP_ORDER } , <nl> - { 0x03 , " add " , REG_OPER_OP_ORDER } , <nl> - { 0x09 , " or " , OPER_REG_OP_ORDER } , <nl> - { 0x0B , " or " , REG_OPER_OP_ORDER } , <nl> - { 0x1B , " sbb " , REG_OPER_OP_ORDER } , <nl> - { 0x21 , " and " , OPER_REG_OP_ORDER } , <nl> - { 0x23 , " and " , REG_OPER_OP_ORDER } , <nl> - { 0x29 , " sub " , OPER_REG_OP_ORDER } , <nl> - { 0x2A , " subb " , REG_OPER_OP_ORDER } , <nl> - { 0x2B , " sub " , REG_OPER_OP_ORDER } , <nl> - { 0x31 , " xor " , OPER_REG_OP_ORDER } , <nl> - { 0x33 , " xor " , REG_OPER_OP_ORDER } , <nl> - { 0x38 , " cmpb " , OPER_REG_OP_ORDER } , <nl> - { 0x3A , " cmpb " , REG_OPER_OP_ORDER } , <nl> - { 0x3B , " cmp " , REG_OPER_OP_ORDER } , <nl> - { 0x84 , " test_b " , REG_OPER_OP_ORDER } , <nl> - { 0x85 , " test " , REG_OPER_OP_ORDER } , <nl> - { 0x87 , " xchg " , REG_OPER_OP_ORDER } , <nl> - { 0x8A , " mov_b " , REG_OPER_OP_ORDER } , <nl> - { 0x8B , " mov " , REG_OPER_OP_ORDER } , <nl> - { 0x8D , " lea " , REG_OPER_OP_ORDER } , <nl> - { - 1 , " " , UNSET_OP_ORDER } <nl> - } ; <nl> - <nl> + { 0x01 , " add " , OPER_REG_OP_ORDER } , <nl> + { 0x03 , " add " , REG_OPER_OP_ORDER } , <nl> + { 0x09 , " or " , OPER_REG_OP_ORDER } , <nl> + { 0x0B , " or " , REG_OPER_OP_ORDER } , <nl> + { 0x1B , " sbb " , REG_OPER_OP_ORDER } , <nl> + { 0x21 , " and " , OPER_REG_OP_ORDER } , <nl> + { 0x23 , " and " , REG_OPER_OP_ORDER } , <nl> + { 0x29 , " sub " , OPER_REG_OP_ORDER } , <nl> + { 0x2A , " subb " , REG_OPER_OP_ORDER } , <nl> + { 0x2B , " sub " , REG_OPER_OP_ORDER } , <nl> + { 0x31 , " xor " , OPER_REG_OP_ORDER } , <nl> + { 0x33 , " xor " , REG_OPER_OP_ORDER } , <nl> + { 0x38 , " cmpb " , OPER_REG_OP_ORDER } , <nl> + { 0x39 , " cmp " , OPER_REG_OP_ORDER } , <nl> + { 0x3A , " cmpb " , REG_OPER_OP_ORDER } , <nl> + { 0x3B , " cmp " , REG_OPER_OP_ORDER } , <nl> + { 0x84 , " test_b " , REG_OPER_OP_ORDER } , <nl> + { 0x85 , " test " , REG_OPER_OP_ORDER } , <nl> + { 0x87 , " xchg " , REG_OPER_OP_ORDER } , <nl> + { 0x8A , " mov_b " , REG_OPER_OP_ORDER } , <nl> + { 0x8B , " mov " , REG_OPER_OP_ORDER } , <nl> + { 0x8D , " lea " , REG_OPER_OP_ORDER } , <nl> + { - 1 , " " , UNSET_OP_ORDER } } ; <nl> <nl> static const ByteMnemonic zero_operands_instr [ ] = { <nl> { 0xC3 , " ret " , UNSET_OP_ORDER } , <nl>
Emit memory operands for cmp and test on ia32 and x64 when it makes sense .
v8/v8
0e43ff5632d38cfb8b0ea0bc6955d6f252cb9ad8
2016-02-22T09:46:21Z
mmm a / admin / static / coffee / app . coffee <nl> ppp b / admin / static / coffee / app . coffee <nl> clear_modals = - > <nl> register_modal = ( modal ) - > modal_registry . push ( modal ) <nl> <nl> # TODO Just for development , CHANGE IT BACK TO 5000 <nl> - updateInterval = 50000 <nl> + updateInterval = 5000 <nl> statUpdateInterval = 1000 <nl> <nl> declare_client_connected = - > <nl> mmm a / admin / static / coffee / namespaces / replicas . coffee <nl> ppp b / admin / static / coffee / namespaces / replicas . coffee <nl> module ' NamespaceView ' , - > <nl> <nl> modify_replicas : ( event ) = > <nl> event . preventDefault ( ) <nl> - datacenter_id = @ . $ ( event . target ) . data ' id ' # We do not let people change the number of replicas of Universe . <nl> - datacenter = datacenters . get datacenter_id <nl> + datacenter_id = @ . $ ( event . target ) . data ' id ' <nl> + if datacenter_id is universe_datacenter . get ( ' id ' ) <nl> + datacenter = universe_datacenter <nl> + else <nl> + datacenter = datacenters . get datacenter_id <nl> modal = new NamespaceView . ModifyReplicasModal @ model , datacenter <nl> modal . render ( ) <nl> <nl> module ' NamespaceView ' , - > <nl> new_affinities = { } <nl> if @ model . get ( ' primary_uuid ' ) is universe_datacenter . get ( ' id ' ) <nl> old_dc = universe_datacenter <nl> - new_affinities [ old_dc . get ( ' id ' ) ] = 0 <nl> else <nl> old_dc = datacenters . get ( @ model . get ( ' primary_uuid ' ) ) <nl> - new_affinities [ old_dc . get ( ' id ' ) ] = DataUtils . get_replica_affinities ( @ model . get ( ' id ' ) , old_dc . get ( ' id ' ) ) + 1 <nl> + new_affinities [ old_dc . get ( ' id ' ) ] = DataUtils . get_replica_affinities ( @ model . get ( ' id ' ) , old_dc . get ( ' id ' ) ) + 1 <nl> <nl> new_affinities [ new_dc . get ( ' id ' ) ] = DataUtils . get_replica_affinities ( @ model . get ( ' id ' ) , new_dc . get ( ' id ' ) ) - 1 <nl> primary_pinnings = { } <nl> module ' NamespaceView ' , - > <nl> render : = > <nl> found_universe = false <nl> data = @ model . toJSON ( ) <nl> + if not @ model . get ( ' primary_uuid ' ) ? <nl> + primary_replica_count = 0 <nl> + else <nl> + primary_replica_count = @ model . get ( ' replica_affinities ' ) [ @ model . get ( ' primary_uuid ' ) ] <nl> + if not primary_replica_count ? <nl> + primary_replica_count = 0 <nl> if @ model . get ( ' primary_uuid ' ) is universe_datacenter . get ( ' id ' ) <nl> found_universe = true <nl> data = _ . extend data , <nl> primary : <nl> id : @ model . get ( ' primary_uuid ' ) <nl> name : universe_datacenter . get ( ' name ' ) <nl> - replicas : 1 # we ' re adding one because primary is also a replica <nl> + replicas : primary_replica_count + 1 # we ' re adding one because primary is also a replica <nl> total_machines : machines . length <nl> acks : DataUtils . get_ack_expectations ( @ model . get ( ' id ' ) , @ model . get ( ' primary_uuid ' ) ) <nl> - is_universe : true <nl> # status : DataUtils . get_namespace_status ( @ model . get ( ' id ' ) , @ model . get ( ' primary_uuid ' ) ) <nl> - <nl> else <nl> - if not @ model . get ( ' primary_uuid ' ) ? <nl> - primary_replica_count = 0 <nl> - else <nl> - primary_replica_count = @ model . get ( ' replica_affinities ' ) [ @ model . get ( ' primary_uuid ' ) ] <nl> - if not primary_replica_count ? <nl> - primary_replica_count = 0 <nl> - <nl> data = _ . extend data , <nl> primary : <nl> id : @ model . get ( ' primary_uuid ' ) <nl> module ' NamespaceView ' , - > <nl> log_initial ' ( initializing ) modal dialog : modify replicas ' <nl> @ namespace = namespace <nl> @ datacenter = datacenter <nl> - @ total_machines = DataUtils . get_datacenter_machines ( @ datacenter . get ( ' id ' ) ) . length <nl> + <nl> + if @ datacenter is universe_datacenter <nl> + @ max_machines = machines . length <nl> + for datacenter_id of @ namespace . get ( ' replica_affinities ' ) <nl> + if datacenter_id isnt universe_datacenter . get ( ' id ' ) <nl> + @ max_machines - = @ namespace . get ( ' replica_affinities ' ) [ datacenter_id ] <nl> + if @ namespace . get ( ' primary_uuid ' ) isnt universe_datacenter . get ( ' id ' ) <nl> + @ max_machines - = 1 <nl> + else <nl> + @ max_machines = machines . length <nl> + for datacenter_id of @ namespace . get ( ' replica_affinities ' ) <nl> + if datacenter_id isnt @ datacenter . get ( ' id ' ) <nl> + @ max_machines - = @ namespace . get ( ' replica_affinities ' ) [ datacenter_id ] <nl> + if @ namespace . get ( ' primary_uuid ' ) isnt @ datacenter . get ( ' id ' ) <nl> + @ max_machines - = 1 <nl> + <nl> + @ need_explanation = @ max_machines > DataUtils . get_datacenter_machines ( @ datacenter . get ( ' id ' ) ) . length <nl> + @ max_machines = Math . min @ max_machines , DataUtils . get_datacenter_machines ( @ datacenter . get ( ' id ' ) ) . length <nl> + <nl> @ nreplicas = @ adjustReplicaCount ( DataUtils . get_replica_affinities ( @ namespace . get ( ' id ' ) , @ datacenter . id ) , true ) <nl> @ nacks = DataUtils . get_ack_expectations ( @ namespace . get ( ' id ' ) , @ datacenter . get ( ' id ' ) ) <nl> super <nl> module ' NamespaceView ' , - > <nl> return numreplicas <nl> <nl> compute_json : - > <nl> + <nl> json = <nl> namespace : @ namespace . toJSON ( ) <nl> datacenter : @ datacenter . toJSON ( ) <nl> num_replicas : @ nreplicas <nl> - total_machines : @ total_machines <nl> + max_machines : @ max_machines <nl> num_acks : @ nacks <nl> modal_title : ' Modify replica settings ' <nl> btn_primary_text : ' Commit ' <nl> + need_explanation : @ need_explanation if @ need_explanation ? <nl> return json <nl> <nl> render_inner : ( error_msg , nreplicas_input , nacks_input ) - > <nl> module ' NamespaceView ' , - > <nl> nacks_input = parseInt ( formdata . num_acks ) <nl> <nl> msg_error = [ ] <nl> - if nreplicas_input > @ total_machines <nl> - msg_error . push ( ' The number of replicas ( ' + nreplicas_input + ' ) cannot exceed the total number of machines ( ' + @ total_machines + ' ) . ' ) <nl> + if nreplicas_input > @ max_machines <nl> + msg_error . push ( ' The number of replicas ( ' + nreplicas_input + ' ) cannot exceed the total number of machines ( ' + @ max_machines + ' ) . ' ) <nl> if nreplicas_input is 0 and @ namespace . get ( ' primary_uuid ' ) is @ datacenter . get ( ' id ' ) <nl> msg_error . push ( ' The number of replicas must be at least one because ' + @ datacenter . get ( ' name ' ) + ' is the primary datacenter for this namespace . ' ) <nl> if nacks_input > nreplicas_input <nl> module ' NamespaceView ' , - > <nl> @ old_acks = DataUtils . get_ack_expectations ( @ namespace . get ( ' id ' ) , @ datacenter . id ) <nl> @ modified_acks = @ nacks isnt @ old_acks <nl> @ datacenter_uuid = formdata . datacenter <nl> - @ datacenter_name = datacenters . get ( @ datacenter_uuid ) . get ( ' name ' ) <nl> + if @ datacenter_uuid is universe_datacenter . get ( ' id ' ) <nl> + @ datacenter_name = universe_datacenter . get ( ' name ' ) <nl> + else <nl> + @ datacenter_name = datacenters . get ( @ datacenter_uuid ) . get ( ' name ' ) <nl> <nl> $ . ajax <nl> processData : false <nl> mmm a / admin / static / coffee / namespaces / replicas . html <nl> ppp b / admin / static / coffee / namespaces / replicas . html <nl> < h3 class = " title " > Replication settings < / h3 > <nl> { { / if } } <nl> < / td > <nl> < td > <nl> - { { # unless is_universe } } <nl> < a class = " edit_replicas " href = " # " data - id = " { { id } } " > Edit replicas < / a > <nl> - { { / unless } } <nl> < / td > <nl> < / tr > <nl> { { / with } } <nl> < h3 class = " title " > Replication settings < / h3 > <nl> { { / with } } <nl> < / td > <nl> < td > <nl> - { { # unless is_universe } } <nl> < a class = " edit_replicas " href = " # " data - id = " { { id } } " > Edit replicas < / a > | <nl> - { { / unless } } <nl> < a class = " make_primary " href = " # " data - id = " { { id } } " > Make primary < / a > <nl> < / td > <nl> <nl> < h3 class = " title " > Replication settings < / h3 > <nl> < td > None < / td > <nl> < td colspan = " 3 " > N / A < / td > <nl> < td > <nl> - { { # unless is_universe } } <nl> < a class = " edit_replicas " href = " # " data - id = " { { id } } " > Edit replicas < / a > <nl> - { { / unless } } <nl> < / td > <nl> < / tr > <nl> { { / each } } <nl> < h4 > Number of replicas : < / h4 > <nl> < div class = " input " > <nl> < input class = " xlarge " id = " focus_num_replicas " name = " num_replicas " type = " text " value = " { { # if nreplicas_input } } { { nreplicas_input } } { { else } } { { num_replicas } } { { / if } } " / > <nl> < p class = " current " > Current replicas : { { num_replicas } } < / p > <nl> - < p class = " max " > Max replicas : { { total_machines } } < / p > <nl> + < p class = " max " > Max replicas : { { max_machines } } { { # if need_explanation } } The maximum of machines you can set is less than the number of machines in the datacenter because you set a high number of replicas in Universe { { / if } } < / p > <nl> < / div > <nl> < / div > <nl> < hr / > <nl>
Fix max replicas for Universe and others
rethinkdb/rethinkdb
f501401fb97607250da1dc12a341510e9a1570c4
2012-10-02T00:27:34Z
mmm a / dbms / tests / clickhouse - test <nl> ppp b / dbms / tests / clickhouse - test <nl> def main ( args ) : <nl> all_tests = filter ( lambda case : is_test_from_dir ( suite_dir , case ) , all_tests ) <nl> if args . test : <nl> all_tests = [ t for t in all_tests if any ( [ re . search ( r , t ) for r in args . test ] ) ] <nl> + all_tests . sort ( key = key_func ) <nl> <nl> run_n , run_total = args . parallel . split ( ' / ' ) <nl> run_n = float ( run_n ) <nl>
Fix sorting in clickhouse - test .
ClickHouse/ClickHouse
1965c612abc5e16719952f23ad5407e91471ad29
2019-10-09T11:56:51Z
mmm a / tests / cpp - tests / Classes / LayerTest / LayerTest . cpp <nl> ppp b / tests / cpp - tests / Classes / LayerTest / LayerTest . cpp <nl> LayerTests : : LayerTests ( ) <nl> ADD_TEST_CASE ( LayerExtendedBlendOpacityTest ) ; <nl> ADD_TEST_CASE ( LayerBug3162A ) ; <nl> ADD_TEST_CASE ( LayerBug3162B ) ; <nl> - / / ADD_TEST_CASE ( LayerColorOccludeBug ) ; / / TODO crash on iPhone X <nl> - / / ADD_TEST_CASE ( LayerRadialGradientTest ) ; <nl> + ADD_TEST_CASE ( LayerColorOccludeBug ) ; <nl> + ADD_TEST_CASE ( LayerRadialGradientTest ) ; <nl> } <nl> <nl> / / Cascading support extensions <nl>
add test cases ( )
cocos2d/cocos2d-x
254c57a3471b2d4fe54f2b91351baf40c5407be8
2019-05-22T02:00:05Z
mmm a / swoole_coroutine_util . cc <nl> ppp b / swoole_coroutine_util . cc <nl> static zend_class_entry * swoole_coroutine_iterator_class_entry_ptr ; <nl> static zend_class_entry swoole_exit_exception_ce ; <nl> static zend_class_entry * swoole_exit_exception_class_entry_ptr ; <nl> <nl> + BEGIN_EXTERN_C ( ) <nl> extern int swoole_coroutine_statvfs ( const char * path , struct statvfs * buf ) ; <nl> + END_EXTERN_C ( ) <nl> <nl> static const zend_function_entry swoole_coroutine_util_methods [ ] = <nl> { <nl> mmm a / swoole_redis_server . cc <nl> ppp b / swoole_redis_server . cc <nl> <nl> # ifdef SW_COROUTINE <nl> # include " swoole_coroutine . h " <nl> # endif <nl> + BEGIN_EXTERN_C ( ) <nl> # include " ext / standard / php_string . h " <nl> + END_EXTERN_C ( ) <nl> <nl> static zend_class_entry swoole_redis_server_ce ; <nl> static zend_class_entry * swoole_redis_server_class_entry_ptr ; <nl>
Add extern C .
swoole/swoole-src
a185743cbf26f317499f2880a4872a55774df857
2018-11-22T10:44:58Z
mmm a / app / win / atom . rc <nl> ppp b / app / win / atom . rc <nl> END <nl> / / <nl> <nl> VS_VERSION_INFO VERSIONINFO <nl> - FILEVERSION 0 , 7 , 0 , 0 <nl> - PRODUCTVERSION 0 , 7 , 0 , 0 <nl> + FILEVERSION 0 , 7 , 1 , 0 <nl> + PRODUCTVERSION 0 , 7 , 1 , 0 <nl> FILEFLAGSMASK 0x3fL <nl> # ifdef _DEBUG <nl> FILEFLAGS 0x1L <nl> BEGIN <nl> BEGIN <nl> VALUE " CompanyName " , " GitHub , Inc . " <nl> VALUE " FileDescription " , " Atom - Shell " <nl> - VALUE " FileVersion " , " 0 . 7 . 0 " <nl> + VALUE " FileVersion " , " 0 . 7 . 1 " <nl> VALUE " InternalName " , " atom . exe " <nl> VALUE " LegalCopyright " , " Copyright ( C ) 2013 GitHub , Inc . All rights reserved . " <nl> VALUE " OriginalFilename " , " atom . exe " <nl> VALUE " ProductName " , " Atom - Shell " <nl> - VALUE " ProductVersion " , " 0 . 7 . 0 " <nl> + VALUE " ProductVersion " , " 0 . 7 . 1 " <nl> END <nl> END <nl> BLOCK " VarFileInfo " <nl> mmm a / browser / mac / Info . plist <nl> ppp b / browser / mac / Info . plist <nl> <nl> < key > CFBundleIconFile < / key > <nl> < string > atom . icns < / string > <nl> < key > CFBundleVersion < / key > <nl> - < string > 0 . 7 . 0 < / string > <nl> + < string > 0 . 7 . 1 < / string > <nl> < key > NSMainNibFile < / key > <nl> < string > MainMenu < / string > <nl> < key > NSPrincipalClass < / key > <nl> mmm a / common / atom_version . h <nl> ppp b / common / atom_version . h <nl> <nl> <nl> # define ATOM_MAJOR_VERSION 0 <nl> # define ATOM_MINOR_VERSION 7 <nl> - # define ATOM_PATCH_VERSION 0 <nl> + # define ATOM_PATCH_VERSION 1 <nl> <nl> # define ATOM_VERSION_IS_RELEASE 1 <nl> <nl> mmm a / package . json <nl> ppp b / package . json <nl> <nl> { <nl> " name " : " atom - shell " , <nl> - " version " : " 0 . 7 . 0 " , <nl> + " version " : " 0 . 7 . 1 " , <nl> <nl> " devDependencies " : { <nl> " coffee - script " : " ~ 1 . 6 . 3 " , <nl>
Bump v0 . 7 . 1 .
electron/electron
da03784610aba44bfca28691adef4d9cd0056623
2013-11-23T09:09:42Z
mmm a / src / objects . cc <nl> ppp b / src / objects . cc <nl> MaybeObject * JSObject : : SetLocalPropertyIgnoreAttributes ( <nl> Representation representation = lookup . representation ( ) ; <nl> Representation value_representation = <nl> value - > OptimalRepresentation ( value_type ) ; <nl> + if ( value_representation . IsNone ( ) ) break ; <nl> if ( ! value_representation . fits_into ( representation ) ) { <nl> MaybeObject * maybe_failure = self - > GeneralizeFieldRepresentation ( <nl> lookup . GetDescriptorIndex ( ) , value_representation ) ; <nl> new file mode 100644 <nl> index 00000000000 . . 4f8f7915b1c <nl> mmm / dev / null <nl> ppp b / test / mjsunit / regress / regress - 2717 . js <nl> <nl> + / / Copyright 2013 the V8 project authors . All rights reserved . <nl> + / / Redistribution and use in source and binary forms , with or without <nl> + / / modification , are permitted provided that the following conditions are <nl> + / / met : <nl> + / / <nl> + / / * Redistributions of source code must retain the above copyright <nl> + / / notice , this list of conditions and the following disclaimer . <nl> + / / * Redistributions in binary form must reproduce the above <nl> + / / copyright notice , this list of conditions and the following <nl> + / / disclaimer in the documentation and / or other materials provided <nl> + / / with the distribution . <nl> + / / * Neither the name of Google Inc . nor the names of its <nl> + / / contributors may be used to endorse or promote products derived <nl> + / / from this software without specific prior written permission . <nl> + / / <nl> + / / THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS <nl> + / / " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT <nl> + / / LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR <nl> + / / A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT <nl> + / / OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , <nl> + / / SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT <nl> + / / LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , <nl> + / / DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY <nl> + / / THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT <nl> + / / ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE <nl> + / / OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE . <nl> + <nl> + / / Test re - initializing existing field which is already being tracked as <nl> + / / having double representation . <nl> + ( function ( ) { <nl> + function test1 ( a ) { <nl> + return { x : 1 . 5 , x : a } ; <nl> + } ; <nl> + <nl> + assertEquals ( { } , test1 ( { } ) . x ) ; <nl> + } ) ( ) ; <nl> + <nl> + / / Test initializing new field which follows an existing transition to a <nl> + / / map that tracks it as having double representation . <nl> + ( function ( ) { <nl> + function test1 ( a ) { <nl> + return { y : a } ; <nl> + } ; <nl> + <nl> + function test2 ( a ) { <nl> + return { y : a } ; <nl> + } ; <nl> + <nl> + assertEquals ( 1 . 5 , test1 ( 1 . 5 ) . y ) ; <nl> + assertEquals ( { } , test2 ( { } ) . y ) ; <nl> + } ) ( ) ; <nl>
Fix re - initialization of existing double field .
v8/v8
ecc41e30c08cdfe172fe2c182fe5e58701a1b912
2013-06-10T11:55:47Z
mmm a / admin / static / coffee / modals . coffee <nl> ppp b / admin / static / coffee / modals . coffee <nl> module ' Modals ' , - > <nl> super <nl> modal_title : " Add database " <nl> btn_primary_text : " Add " <nl> - @ . $ ( ' . focus_new_name ' ) . focus ( ) <nl> + @ $ ( ' . focus_new_name ' ) . focus ( ) <nl> <nl> on_submit : = > <nl> super <nl> module ' Modals ' , - > <nl> <nl> render : = > <nl> ordered_databases = @ databases . map ( d ) - > <nl> - id : d . get ( ' db ' ) # TODO Fix when API is available <nl> - db : d . get ( ' db ' ) <nl> + name : d . get ( ' name ' ) <nl> ordered_databases = _ . sortBy ordered_databases , ( d ) - > d . db <nl> <nl> super <nl> module ' Modals ' , - > <nl> template_error = { } <nl> input_error = false <nl> <nl> - # Need a name <nl> - if @ formdata . name is ' ' <nl> + <nl> + <nl> + <nl> + if @ formdata . name is ' ' # Need a name <nl> input_error = true <nl> template_error . namespace_is_empty = true <nl> else if / ^ [ a - zA - Z0 - 9_ ] + $ / . test ( @ formdata . name ) is false <nl> module ' Modals ' , - > <nl> input_error = true <nl> template_error . no_database = true <nl> else # And a name that doesn ' t exist <nl> - db = @ databases . get ( @ formdata . database ) <nl> - for table in db . get ( ' tables ' ) <nl> + database_used = null <nl> + for database in @ databases . models <nl> + if database . get ( ' name ' ) is @ formdata . database <nl> + database_used = database <nl> + break <nl> + <nl> + for table in database_used . get ( ' tables ' ) <nl> if table . name is @ formdata . name <nl> input_error = true <nl> template_error . namespace_exists = true <nl> module ' Modals ' , - > <nl> template : Handlebars . templates [ ' remove_namespace - modal - template ' ] <nl> class : ' remove - namespace - dialog ' <nl> <nl> - initialize : - > <nl> - log_initial ' ( initializing ) modal dialog : remove namespace ' <nl> - super <nl> - <nl> render : ( tables_to_delete ) = > <nl> @ tables_to_delete = tables_to_delete <nl> <nl> super <nl> modal_title : ' Delete tables ' <nl> btn_primary_text : ' Delete ' <nl> - namespaces : tables_to_delete <nl> + tables : tables_to_delete <nl> + single_delete : tables_to_delete . length is 1 <nl> <nl> - @ . $ ( ' . btn - primary ' ) . focus ( ) <nl> + @ $ ( ' . btn - primary ' ) . focus ( ) <nl> <nl> - on_submit : - > <nl> + on_submit : = > <nl> super <nl> <nl> query = r . expr ( @ tables_to_delete ) . forEach ( table ) - > <nl> module ' Modals ' , - > <nl> @ on_error ( new Error ( " The value returned for ` dropped ` did not match the number of tables . " ) ) <nl> <nl> <nl> - on_success : ( response ) - > <nl> + on_success : ( response ) = > <nl> super <nl> <nl> # Build feedback message <nl> module ' Modals ' , - > <nl> if index < @ tables_to_delete . length - 1 <nl> message + = " , " <nl> if @ tables_to_delete . length is 1 <nl> - message + = + " was " <nl> + message + = " was " <nl> else <nl> message + = " were " <nl> message + = " successfully deleted . " <nl> <nl> + if Backbone . history . fragment isnt ' tables ' <nl> + window . router . navigate ' # tables ' , { trigger : true } <nl> window . app . current_view . render_message message <nl> + @ remove ( ) <nl> mmm a / admin / static / coffee / tables / index . coffee <nl> ppp b / admin / static / coffee / tables / index . coffee <nl> module ' TablesView ' , - > <nl> query = r . db ( system_db ) . table ( ' db_config ' ) . filter ( ( db ) - > <nl> db ( " name " ) . ne ( ' rethinkdb ' ) <nl> ) . orderBy ( r . row ) . map ( db ) - > <nl> - db : db ( " name " ) <nl> + name : db ( " name " ) <nl> id : db ( " uuid " ) <nl> tables : r . db ( system_db ) . table ( ' table_status ' ) . orderBy ( ( table ) - > table ( " name " ) ) <nl> . filter ( { db : db ( " name " ) } ) . merge ( ( table ) - > <nl> module ' TablesView ' , - > <nl> # TODO <nl> console . log error <nl> else <nl> + console . log JSON . stringify ( result , null , 2 ) <nl> @ loading = false # TODO Move that outside the ` if ` statement ? <nl> databases = { } <nl> for database , index in result <nl> module ' TablesView ' , - > <nl> render : = > <nl> @ $ el . html @ template @ model . toJSON ( ) <nl> @ <nl> - <nl> - <nl> - module ' NamespacesView ' , - > <nl> - # Show a list of databases <nl> - class @ DatabaseList extends UIComponents . AbstractList <nl> - # Use a namespace - specific template for the namespace list <nl> - template : Handlebars . templates [ ' database_list - template ' ] <nl> - className : ' databases_list - container ' <nl> - alert_message_template : Handlebars . templates [ ' alert_message - template ' ] <nl> - <nl> - events : <nl> - ' click . add - database ' : ' add_database ' <nl> - ' click . add - namespace ' : ' add_namespace ' <nl> - ' click . remove - namespace ' : ' remove_namespace ' <nl> - ' click . close ' : ' remove_parent_alert ' <nl> - <nl> - initialize : - > <nl> - log_initial ' ( initializing ) namespace list view ' <nl> - <nl> - @ add_database_dialog = new NamespaceView . AddDatabaseModal <nl> - @ add_namespace_dialog = new NamespaceView . AddNamespaceModal <nl> - @ remove_namespace_dialog = new NamespaceView . RemoveNamespaceModal <nl> - <nl> - super databases , NamespaceView . DatabaseListElement , ' . collapsible - list ' , <nl> - sort : ( a , b ) - > <nl> - if b . model . get ( ' name ' ) < a . model . get ( ' name ' ) <nl> - return 1 <nl> - else if b . model . get ( ' name ' ) > a . model . get ( ' name ' ) <nl> - return - 1 <nl> - return 0 <nl> - , ' database ' , ' cluster ' <nl> - <nl> - @ datacenters_length = - 1 <nl> - @ databases_length = - 1 <nl> - datacenters . on ' all ' , @ update_button_create_namespace <nl> - databases . on ' all ' , @ update_button_create_namespace <nl> - @ can_create_namespace = true <nl> - <nl> - update_button_create_namespace : = > <nl> - if databases . length is 0 and @ can_create_namespace is true <nl> - @ . $ ( ' . add - namespace ' ) . prop ' disabled ' , true <nl> - @ . $ ( ' . user_alert_space - cannot_create_namespace ' ) . show ( ) <nl> - else if databases . length > 0 and @ can_create_namespace is false <nl> - @ . $ ( ' . add - namespace ' ) . prop ' disabled ' , false <nl> - @ . $ ( ' . user_alert_space - cannot_create_namespace ' ) . hide ( ) <nl> - <nl> - render : ( message ) = > <nl> - super <nl> - @ update_toolbar_buttons ( ) <nl> - <nl> - if message ? <nl> - @ . $ ( ' # user - alert - space ' ) . append @ alert_message_template <nl> - message : message <nl> - @ update_button_create_namespace ( ) <nl> - return @ <nl> - <nl> - remove_parent_alert : ( event ) - > <nl> - event . preventDefault ( ) <nl> - element = $ ( event . target ) . parent ( ) <nl> - element . slideUp ' fast ' , - > element . remove ( ) <nl> - <nl> - rename_namespace : ( event ) - > <nl> - event . preventDefault ( ) <nl> - rename_modal = new UIComponents . RenameItemModal @ model . get ( ' id ' ) , ' namespace ' <nl> - rename_modal . render ( ) <nl> - <nl> - add_database : ( event ) = > <nl> - @ add_database_dialog . render @ databases <nl> - <nl> - add_namespace : ( event ) = > <nl> - event . preventDefault ( ) <nl> - @ add_namespace_dialog . render ( ) <nl> - $ ( ' # focus_namespace_name ' ) . focus ( ) <nl> - <nl> - remove_namespace : ( event ) = > <nl> - log_action ' remove namespace button clicked ' <nl> - # Make sure the button isn ' t disabled , and pass the list of namespace UUIDs selected <nl> - if not $ ( event . currentTarget ) . is ' : disabled ' <nl> - @ remove_namespace_dialog . render @ get_selected_namespaces ( ) <nl> - event . preventDefault ( ) <nl> - <nl> - # Extend the AbstractList . add_element method to bind a callback to each namespace added to the list <nl> - add_element : ( element ) = > <nl> - namespaces_list_element = super element <nl> - namespaces_list_element . register_namespace_callback [ @ update_toolbar_buttons ] <nl> - # TODO destroy this listener <nl> - <nl> - # Count up the number of namespaces checked off across all machine lists <nl> - get_selected_namespaces : = > <nl> - namespaces_lists = _ . map @ element_views , ( database_list_element ) - > <nl> - database_list_element . namespace_list <nl> - <nl> - selected_namespaces = [ ] <nl> - for namespaces_list in namespaces_lists <nl> - selected_namespaces = selected_namespaces . concat namespaces_list . get_selected_elements ( ) <nl> - <nl> - return selected_namespaces <nl> - <nl> - <nl> - # Callback that will be registered : updates the toolbar buttons based on how many namespaces have been selected <nl> - update_toolbar_buttons : = > <nl> - if @ get_selected_namespaces ( ) . length < 1 <nl> - @ . $ ( ' . btn . remove - namespace ' ) . attr ' disabled ' , true <nl> - else <nl> - @ . $ ( ' . btn . remove - namespace ' ) . removeAttr ' disabled ' <nl> - # @ . $ ( ' . btn . remove - namespace ' ) . is ' : disabled ' , @ get_selected_namespaces ( ) . length < 1 <nl> - <nl> - destroy : = > <nl> - super <nl> - datacenters . off ' all ' , @ update_button_create_namespace <nl> - databases . off ' all ' , @ update_button_create_namespace <nl> - @ add_database_dialog . destroy ( ) <nl> - @ add_namespace_dialog . destroy ( ) <nl> - @ remove_namespace_dialog . destroy ( ) <nl> - <nl> - class @ DatabaseListElement extends UIComponents . CollapsibleListElement <nl> - template : Handlebars . templates [ ' database_list_element - template ' ] <nl> - summary_template : Handlebars . templates [ ' database_list_element - summary - template ' ] <nl> - <nl> - className : ' element - container ' <nl> - <nl> - events : - > <nl> - _ . extend super , <nl> - ' click button . remove - database ' : ' remove_database ' <nl> - <nl> - initialize : - > <nl> - super <nl> - <nl> - @ delegateEvents ( ) <nl> - <nl> - @ namespace_list = new NamespaceView . NamespaceList @ model . get ( ' id ' ) <nl> - @ callbacks = [ ] <nl> - @ no_namespace = true <nl> - <nl> - @ model . on ' change ' , @ render_summary <nl> - @ namespace_list . on ' size_changed ' , @ render <nl> - <nl> - render : = > <nl> - @ . $ el . html @ template ( { } ) <nl> - <nl> - @ render_summary ( ) <nl> - <nl> - # Attach a list of available machines to the given datacenter <nl> - @ . $ ( ' . element - list - container ' ) . html @ namespace_list . render ( ) . $ el <nl> - <nl> - super <nl> - <nl> - return @ <nl> - <nl> - render_summary : = > <nl> - json = @ model . toJSON ( ) <nl> - <nl> - @ . $ ( ' . summary ' ) . html @ summary_template json <nl> - <nl> - remove_database : ( event ) = > <nl> - event . preventDefault ( ) <nl> - <nl> - db = databases . get @ . $ ( event . target ) . data ( ' id ' ) <nl> - if db ? <nl> - remove_database_dialog = new DatabaseView . RemoveDatabaseModal <nl> - remove_database_dialog . render db <nl> - <nl> - register_namespace_callback : ( callbacks ) = > <nl> - @ callbacks = callbacks <nl> - @ namespace_list . register_namespace_callbacks @ callbacks <nl> - <nl> - rename_datacenter : ( event ) - > <nl> - event . preventDefault ( ) <nl> - rename_modal = new UIComponents . RenameItemModal @ model . get ( ' id ' ) , ' datacenter ' <nl> - rename_modal . render ( ) <nl> - <nl> - destroy : = > <nl> - @ model . off ' change ' , @ render_summary <nl> - @ namespace_list . off ' size_changed ' , @ nl_size_changed <nl> - @ namespace_list . destroy ( ) <nl> - <nl> - # Show a list of namespaces <nl> - class @ NamespaceList extends UIComponents . AbstractList <nl> - # Use a namespace - specific template for the namespace list <nl> - tagName : ' div ' <nl> - template : Handlebars . templates [ ' namespace_list - template ' ] <nl> - <nl> - initialize : ( database_id ) = > <nl> - log_initial ' ( initializing ) namespace list view ' <nl> - <nl> - super namespaces , NamespaceView . NamespaceListElement , ' . list ' , <nl> - { <nl> - filter : ( model ) - > model . get ( ' database ' ) is database_id , <nl> - sort : ( a , b ) - > <nl> - if b . model . get ( ' name ' ) < a . model . get ( ' name ' ) <nl> - return 1 <nl> - else if b . model . get ( ' name ' ) > a . model . get ( ' name ' ) <nl> - return - 1 <nl> - return 0 <nl> - } <nl> - , ' table ' , ' database ' <nl> - <nl> - # Extend the AbstractList . add_element method to bind a callback to each namespace added to the list <nl> - add_element : ( element ) = > <nl> - namespace_list_element = super element <nl> - @ bind_callbacks_to_namespace namespace_list_element <nl> - <nl> - add_namespace : ( event ) = > <nl> - event . preventDefault ( ) <nl> - @ add_namespace_dialog . render ( ) <nl> - $ ( ' # focus_namespace_name ' ) . focus ( ) <nl> - <nl> - remove_namespace : ( event ) = > <nl> - log_action ' remove namespace button clicked ' <nl> - # Make sure the button isn ' t disabled , and pass the list of namespace UUIDs selected <nl> - if not $ ( event . currentTarget ) . is ' : disabled ' <nl> - @ remove_namespace_dialog . render @ get_selected_elements ( ) <nl> - event . preventDefault ( ) <nl> - <nl> - register_namespace_callbacks : ( callbacks ) = > <nl> - @ callbacks = callbacks <nl> - @ bind_callbacks_to_namespace namespace_list_element for namespace_list_element in @ element_views <nl> - <nl> - bind_callbacks_to_namespace : ( namespace_list_element ) = > <nl> - namespace_list_element . off ' selected ' <nl> - namespace_list_element . on ' selected ' , = > callback ( ) for callback in @ callbacks <nl> - <nl> - destroy : = > <nl> - super ( ) <nl> - <nl> - # Namespace list element <nl> - class @ NamespaceListElement extends UIComponents . CheckboxListElement <nl> - template : Handlebars . templates [ ' namespace_list_element - template ' ] <nl> - tagName : ' div ' <nl> - <nl> - hide_popover : - > <nl> - $ ( ' . tooltip ' ) . remove ( ) <nl> - <nl> - initialize : - > <nl> - log_initial ' ( initializing ) list view : namespace ' <nl> - super @ template <nl> - directory . on ' all ' , @ render <nl> - <nl> - json_for_template : = > <nl> - json = _ . extend super ( ) , DataUtils . get_namespace_status ( @ model . get ( ' id ' ) ) <nl> - return json <nl> - <nl> - render : = > <nl> - super <nl> - return @ <nl> - <nl> - destroy : = > <nl> - directory . off ' all ' , @ render <nl> - super <nl> - <nl> - class @ AddDatabaseModal extends UIComponents . AbstractModal <nl> - template : Handlebars . templates [ ' add_database - modal - template ' ] <nl> - alert_tmpl : Handlebars . templates [ ' added_database - alert - template ' ] <nl> - error_template : Handlebars . templates [ ' error_input - template ' ] <nl> - <nl> - class : ' add - database ' <nl> - <nl> - initialize : - > <nl> - log_initial ' ( initializing ) modal dialog : add datacenter ' <nl> - super <nl> - <nl> - render : - > <nl> - log_render ' ( rendering ) add datatabase dialog ' <nl> - super <nl> - modal_title : " Add database " <nl> - btn_primary_text : " Add " <nl> - @ . $ ( ' . focus_new_name ' ) . focus ( ) <nl> - <nl> - on_submit : - > <nl> - super <nl> - @ formdata = form_data_as_object ( $ ( ' form ' , @ $ modal ) ) <nl> - <nl> - no_error = true <nl> - if @ formdata . name is ' ' <nl> - no_error = false <nl> - $ ( ' . alert_modal ' ) . html @ error_template <nl> - database_is_empty : true <nl> - else if / ^ [ a - zA - Z0 - 9_ ] + $ / . test ( @ formdata . name ) is false <nl> - no_error = false <nl> - $ ( ' . alert_modal ' ) . html @ error_template <nl> - special_char_detected : true <nl> - type : ' database ' <nl> - else <nl> - for database in databases . models <nl> - if database . get ( ' name ' ) is @ formdata . name <nl> - no_error = false <nl> - $ ( ' . alert_modal ' ) . html @ error_template <nl> - database_exists : true <nl> - break <nl> - if no_error is true <nl> - $ . ajax <nl> - processData : false <nl> - url : ' ajax / semilattice / databases / new ' <nl> - type : ' POST ' <nl> - contentType : ' application / json ' <nl> - data : JSON . stringify ( { " name " : @ formdata . name } ) <nl> - success : @ on_success <nl> - error : @ on_error <nl> - else <nl> - $ ( ' . alert_modal_content ' ) . slideDown ' fast ' <nl> - @ reset_buttons ( ) <nl> - <nl> - on_success : ( response ) = > <nl> - super <nl> - apply_to_collection ( databases , response ) <nl> - <nl> - # Notify the user <nl> - for id , namespace of response <nl> - $ ( ' # user - alert - space ' ) . append @ alert_tmpl <nl> - name : namespace . name <nl> - id : id <nl> - <nl> - # A modal for adding namespaces <nl> - class @ AddNamespaceModal extends UIComponents . AbstractModal <nl> - template : Handlebars . templates [ ' add_namespace - modal - template ' ] <nl> - alert_tmpl : Handlebars . templates [ ' added_namespace - alert - template ' ] <nl> - need_database_alert_template : Handlebars . templates [ ' need_database - alert - template ' ] <nl> - error_template : Handlebars . templates [ ' error_input - template ' ] <nl> - class : ' add - namespace ' <nl> - <nl> - initialize : = > <nl> - log_initial ' ( initializing ) modal dialog : add namespace ' <nl> - super <nl> - <nl> - databases . on ' add ' , @ check_if_can_create_table <nl> - databases . on ' remove ' , @ check_if_can_create_table <nl> - databases . on ' reset ' , @ check_if_can_create_table <nl> - @ can_create_table_status = true <nl> - @ delegateEvents ( ) <nl> - <nl> - show_advanced_settings : ( event ) = > <nl> - event . preventDefault ( ) <nl> - that = @ <nl> - @ . $ ( ' . show_advanced_settings - link_container ' ) . fadeOut ' fast ' , - > <nl> - that . $ ( ' . hide_advanced_settings - link_container ' ) . fadeIn ' fast ' <nl> - @ . $ ( ' . advanced_settings ' ) . slideDown ' fast ' <nl> - <nl> - hide_advanced_settings : ( event ) = > <nl> - event . preventDefault ( ) <nl> - that = @ <nl> - @ . $ ( ' . hide_advanced_settings - link_container ' ) . fadeOut ' fast ' , - > <nl> - that . $ ( ' . show_advanced_settings - link_container ' ) . fadeIn ' fast ' <nl> - @ . $ ( ' . advanced_settings ' ) . slideUp ' fast ' <nl> - <nl> - # Check if we have a database ( if not , we cannot create a table ) <nl> - check_if_can_create_table : = > <nl> - if databases . length is 0 <nl> - if @ can_create_table_status <nl> - @ . $ ( ' . btn - primary ' ) . prop ' disabled ' , true <nl> - @ . $ ( ' . alert_modal ' ) . html ' You need to create a database before creating a table . ' <nl> - else <nl> - if @ can_create_table_status is false <nl> - @ . $ ( ' . alert_modal ' ) . empty ( ) <nl> - @ . $ ( ' . btn - primary ' ) . prop ' disabled ' , false <nl> - <nl> - <nl> - render : - > <nl> - log_render ' ( rendering ) add namespace dialog ' <nl> - <nl> - for datacenter in datacenters . models <nl> - datacenter . set ' num_machines ' , 0 <nl> - <nl> - for machine in machines . models <nl> - if machine . get ( ' datacenter_uuid ' ) isnt universe_datacenter . get ( ' id ' ) <nl> - datacenters . get ( machine . get ( ' datacenter_uuid ' ) ) . set ' num_machines ' , datacenter . get ( ' num_machines ' ) + 1 <nl> - <nl> - ordered_datacenters = _ . map ( datacenters . models , ( datacenter ) - > <nl> - id : datacenter . get ( ' id ' ) <nl> - name : datacenter . get ( ' name ' ) <nl> - num_machines : datacenter . get ( ' num_machines ' ) <nl> - ) <nl> - ordered_datacenters = ordered_datacenters . sort ( a , b ) - > <nl> - return b . num_machines - a . num_machines <nl> - <nl> - slice_index = 0 <nl> - for datacenter in ordered_datacenters <nl> - if datacenter . num_machines is 0 <nl> - break <nl> - slice_index + + <nl> - <nl> - ordered_datacenters = ordered_datacenters . slice 0 , slice_index <nl> - ordered_datacenters . unshift <nl> - id : universe_datacenter . get ( ' id ' ) <nl> - name : universe_datacenter . get ( ' name ' ) <nl> - <nl> - ordered_databases = databases . map ( d ) - > <nl> - id : d . get ( ' id ' ) <nl> - name : d . get ( ' name ' ) <nl> - <nl> - ordered_databases = _ . sortBy ordered_databases , ( d ) - > d . name <nl> - <nl> - super <nl> - modal_title : ' Add table ' <nl> - btn_primary_text : ' Add ' <nl> - datacenters : ordered_datacenters <nl> - all_datacenters : datacenters . length is ordered_datacenters . length <nl> - databases : ordered_databases <nl> - <nl> - @ check_if_can_create_table ( ) <nl> - @ . $ ( ' . show_advanced_settings - link ' ) . click @ show_advanced_settings <nl> - @ . $ ( ' . hide_advanced_settings - link ' ) . click @ hide_advanced_settings <nl> - <nl> - on_submit : = > <nl> - super <nl> - <nl> - formdata = form_data_as_object ( $ ( ' form ' , @ $ modal ) ) <nl> - # Check if data is safe <nl> - template_error = { } <nl> - input_error = false <nl> - <nl> - # Need a name <nl> - if formdata . name is ' ' <nl> - input_error = true <nl> - template_error . namespace_is_empty = true <nl> - else if / ^ [ a - zA - Z0 - 9_ ] + $ / . test ( formdata . name ) is false <nl> - input_error = true <nl> - template_error . special_char_detected = true <nl> - template_error . type = ' table ' <nl> - else # And a name that doesn ' t exist <nl> - for namespace in namespaces . models <nl> - if namespace . get ( ' name ' ) is formdata . name and namespace . get ( ' database ' ) is formdata . database <nl> - input_error = true <nl> - template_error . namespace_exists = true <nl> - break <nl> - # Need a database <nl> - if not formdata . database ? or formdata . database is ' ' <nl> - input_error = true <nl> - template_error . no_database = true <nl> - <nl> - if input_error is true <nl> - $ ( ' . alert_modal ' ) . html @ error_template template_error <nl> - $ ( ' . alert_modal_content ' ) . slideDown ' fast ' <nl> - @ reset_buttons ( ) <nl> - else <nl> - ack = { } <nl> - ack [ universe_datacenter . get ( ' id ' ) ] = <nl> - expectation : 1 <nl> - hard_durability : if formdata . write_disk is ' yes ' then true else false <nl> - <nl> - $ . ajax <nl> - processData : false <nl> - url : ' ajax / semilattice / rdb_namespaces / new ' <nl> - type : ' POST ' <nl> - contentType : ' application / json ' <nl> - data : JSON . stringify ( <nl> - name : formdata . name <nl> - primary_uuid : universe_datacenter . get ( ' id ' ) <nl> - database : formdata . database <nl> - ack_expectations : ack <nl> - primary_key : ( formdata . primary_key if formdata . primary_key isnt ' ' ) <nl> - ) <nl> - success : @ on_success <nl> - error : @ on_error <nl> - <nl> - on_success : ( response ) = > <nl> - super <nl> - <nl> - apply_to_collection ( namespaces , add_protocol_tag ( response , " rdb " ) ) <nl> - for id , namespace of response <nl> - $ ( ' # user - alert - space ' ) . append @ alert_tmpl <nl> - uuid : id <nl> - name : namespace . name <nl> - <nl> - <nl> - # A modal for removing namespaces <nl> - <nl> mmm a / admin / static / coffee / tables / table . coffee <nl> ppp b / admin / static / coffee / tables / table . coffee <nl> module ' TableView ' , - > <nl> # Delete operation <nl> delete_namespace : ( event ) - > <nl> event . preventDefault ( ) <nl> - remove_namespace_dialog = new TableView . RemoveNamespaceModal <nl> - namespace_to_delete = @ model <nl> + if @ remove_namespace_dialog <nl> + @ remove_namespace_dialog . remove ( ) <nl> + @ remove_namespace_dialog = new Modals . RemoveNamespaceModal <nl> <nl> - remove_namespace_dialog . on_success = ( response ) = > <nl> - window . router . navigate ' # tables ' <nl> - window . app . index_namespaces <nl> - alert_message : " The table # { @ model . get ( ' name ' ) } was successfully deleted . " <nl> - namespaces . remove @ model . get ' id ' <nl> + @ remove_namespace_dialog . render [ { <nl> + table : @ model . get ' name ' <nl> + database : @ model . get ' db ' <nl> + } ] <nl> <nl> - remove_namespace_dialog . render [ @ model ] <nl> <nl> destroy : = > <nl> clearInterval @ interval <nl> <nl> - @ title . destroy ( ) <nl> - @ profile . destroy ( ) <nl> - @ replicas . destroy ( ) <nl> - @ shards . destroy ( ) <nl> - @ server_assignments . destroy ( ) <nl> - @ performance_graph . destroy ( ) <nl> - @ secondary_indexes_view . destroy ( ) <nl> + @ title . remove ( ) <nl> + @ profile . remove ( ) <nl> + @ replicas . remove ( ) <nl> + @ shards . remove ( ) <nl> + @ server_assignments . remove ( ) <nl> + @ performance_graph . remove ( ) <nl> + @ secondary_indexes_view . remove ( ) <nl> + <nl> + if @ remove_namespace_dialog <nl> + @ remove_namespace_dialog . remove ( ) <nl> <nl> # TableView . Title <nl> class @ Title extends Backbone . View <nl> mmm a / admin / static / handlebars / modals . html <nl> ppp b / admin / static / handlebars / modals . html <nl> <nl> < label for = " primary_datacenter " > Select a database < / label > <nl> < select class = " database " name = " database " > <nl> { { # each databases } } <nl> - < option value = " { { this . id } } " > { { this . db } } < / option > <nl> + < option value = " { { name } } " > { { name } } < / option > <nl> { { / each } } <nl> < / select > <nl> <nl> <nl> < script id = " remove_namespace - modal - template " type = " text / x - handlebars - template " > <nl> < div class = " alert alert - error displayed_alert " > Deleting a table will delete all its data . This action cannot be reversed . < / div > <nl> < div class = " alert alert - error error_answer " > < / div > <nl> - < h4 > Are you sure you want to delete these tables : < / h4 > <nl> + { { # if single_delete } } <nl> + < h4 > Are you sure you want to delete this table : < / h4 > <nl> + { { else } } <nl> + < h4 > Are you sure you want to delete these tables : < / h4 > <nl> + { { / if } } <nl> < ul class = " namespace - list " > <nl> - { { # each namespaces } } <nl> + { { # each tables } } <nl> < li > < a href = " # tables / TODO " > { { database } } . { { table } } < / a > < / li > <nl> { { / each } } <nl> < / ul > <nl> mmm a / admin / static / handlebars / tables / index . html <nl> ppp b / admin / static / handlebars / tables / index . html <nl> < h1 class = " title " > Tables in the cluster < / h1 > <nl> < script id = " database - template " type = " text / x - handlebars - template " > <nl> < div class = " header_container " > <nl> < div class = " element - type " > Database < / div > <nl> - < h3 class = " name " > < a href = " # databases / { { id } } " > { { db } } < / a > < / h3 > <nl> + < h3 class = " name " > < a href = " # databases / { { id } } " > { { name } } < / a > < / h3 > <nl> < div class = " buttons " > <nl> < button class = " btn remove - database " data - id = { { id } } > Delete Database < / a > <nl> < / div > <nl>
Fix following the use of db_config
rethinkdb/rethinkdb
9070458057f57bb99b5052a7c74b32f4b6c2eb87
2014-09-09T08:50:13Z
mmm a / addons / skin . confluence / 720p / SmartPlaylistEditor . xml <nl> ppp b / addons / skin . confluence / 720p / SmartPlaylistEditor . xml <nl> <nl> < coordinates > <nl> < system > 1 < / system > <nl> < posx > 240 < / posx > <nl> - < posy > 45 < / posy > <nl> + < posy > 22 < / posy > <nl> < / coordinates > <nl> < include > dialogeffect < / include > <nl> < controls > <nl> <nl> < posx > 0 < / posx > <nl> < posy > 0 < / posy > <nl> < width > 800 < / width > <nl> - < height > 630 < / height > <nl> + < height > 675 < / height > <nl> < texture border = " 40 " > DialogBack . png < / texture > <nl> < / control > <nl> < control type = " image " > <nl> <nl> < onright > 19 < / onright > <nl> < onleft > 19 < / onleft > <nl> < onup > 17 < / onup > <nl> - < ondown > 9001 < / ondown > <nl> + < ondown > 23 < / ondown > <nl> < / control > <nl> <nl> < control type = " togglebutton " id = " 19 " > <nl> <nl> < onright > 18 < / onright > <nl> < onleft > 18 < / onleft > <nl> < onup > 17 < / onup > <nl> + < ondown > 24 < / ondown > <nl> + < / control > <nl> + <nl> + < control type = " spincontrolex " id = " 23 " > <nl> + < posx > 30 < / posx > <nl> + < posy > 530 < / posy > <nl> + < width > 550 < / width > <nl> + < height > 40 < / height > <nl> + < label > 21458 < / label > <nl> + < font > font13 < / font > <nl> + < texturefocus border = " 5 " > button - focus2 . png < / texturefocus > <nl> + < texturenofocus border = " 5 " > button - nofocus . png < / texturenofocus > <nl> + < onright > 24 < / onright > <nl> + < onleft > 24 < / onleft > <nl> + < onup > 18 < / onup > <nl> + < ondown > 9001 < / ondown > <nl> + < / control > <nl> + <nl> + < control type = " radiobutton " id = " 24 " > <nl> + < posx > 590 < / posx > <nl> + < posy > 530 < / posy > <nl> + < width > 180 < / width > <nl> + < height > 40 < / height > <nl> + < font > font12_title < / font > <nl> + < align > center < / align > <nl> + < aligny > center < / aligny > <nl> + < texturenofocus border = " 5 " > button - nofocus . png < / texturenofocus > <nl> + < texturefocus border = " 5 " > button - focus . png < / texturefocus > <nl> + < label > 21459 < / label > <nl> + < onright > 23 < / onright > <nl> + < onleft > 23 < / onleft > <nl> + < onup > 19 < / onup > <nl> < ondown > 9001 < / ondown > <nl> < / control > <nl> + <nl> < control type = " group " id = " 9001 " > <nl> < control type = " button " id = " 20 " > <nl> < description > Ok Button < / description > <nl> < posx > 195 < / posx > <nl> - < posy > 560 < / posy > <nl> + < posy > 605 < / posy > <nl> < width > 200 < / width > <nl> < height > 40 < / height > <nl> < align > center < / align > <nl> < aligny > center < / aligny > <nl> < label > 186 < / label > <nl> < font > font12_title < / font > <nl> - < onup > 18 < / onup > <nl> + < onup > 23 < / onup > <nl> < onleft > 21 < / onleft > <nl> < onright > 21 < / onright > <nl> < ondown > 22 < / ondown > <nl> <nl> < control type = " button " id = " 21 " > <nl> < description > Cancel Button < / description > <nl> < posx > 405 < / posx > <nl> - < posy > 560 < / posy > <nl> + < posy > 605 < / posy > <nl> < width > 200 < / width > <nl> < height > 40 < / height > <nl> < align > center < / align > <nl> < aligny > center < / aligny > <nl> < label > 222 < / label > <nl> < font > font12_title < / font > <nl> - < onup > 18 < / onup > <nl> + < onup > 23 < / onup > <nl> < onleft > 20 < / onleft > <nl> < onright > 20 < / onright > <nl> < ondown > 22 < / ondown > <nl>
[ confluence ] add controls for grouping in SmartPlaylistEditor . xml
xbmc/xbmc
ec5c24b0c6ad83c529967d8df2f6a833d7f145bf
2013-04-08T20:54:53Z
mmm a / src / python / grpcio_tests / tests / unit / _api_test . py <nl> ppp b / src / python / grpcio_tests / tests / unit / _api_test . py <nl> def testAll ( self ) : <nl> ' ServiceRpcHandler ' , <nl> ' Server ' , <nl> ' ServerInterceptor ' , <nl> + ' LocalConnectionType ' , <nl> + ' local_channel_credentials ' , <nl> + ' local_server_credentials ' , <nl> ' unary_unary_rpc_method_handler ' , <nl> ' unary_stream_rpc_method_handler ' , <nl> ' stream_unary_rpc_method_handler ' , <nl>
Make _api_test . py happy
grpc/grpc
64dd53273252db24662b93d4e3408c58bfc000f4
2019-08-16T21:55:48Z
mmm a / libraries / chain / controller . cpp <nl> ppp b / libraries / chain / controller . cpp <nl> struct controller_impl { <nl> trace - > receipt = push_receipt ( gtrx . trx_id , transaction_receipt : : expired , billed_cpu_time_us , 0 ) ; / / expire the transaction <nl> trace - > account_ram_delta = account_delta ( gtrx . payer , trx_removal_ram_delta ) ; <nl> emit ( self . accepted_transaction , trx ) ; <nl> - emit ( self . applied_transaction , trace ) ; <nl> + emit ( self . applied_transaction , std : : tie ( trace , dtrx ) ) ; <nl> undo_session . squash ( ) ; <nl> return trace ; <nl> } <nl> struct controller_impl { <nl> trace - > account_ram_delta = account_delta ( gtrx . payer , trx_removal_ram_delta ) ; <nl> <nl> emit ( self . accepted_transaction , trx ) ; <nl> - emit ( self . applied_transaction , trace ) ; <nl> + emit ( self . applied_transaction , std : : tie ( trace , dtrx ) ) ; <nl> <nl> trx_context . squash ( ) ; <nl> undo_session . squash ( ) ; <nl> struct controller_impl { <nl> if ( ! trace - > except_ptr ) { <nl> trace - > account_ram_delta = account_delta ( gtrx . payer , trx_removal_ram_delta ) ; <nl> emit ( self . accepted_transaction , trx ) ; <nl> - emit ( self . applied_transaction , trace ) ; <nl> + emit ( self . applied_transaction , std : : tie ( trace , dtrx ) ) ; <nl> undo_session . squash ( ) ; <nl> return trace ; <nl> } <nl> struct controller_impl { <nl> trace - > account_ram_delta = account_delta ( gtrx . payer , trx_removal_ram_delta ) ; <nl> <nl> emit ( self . accepted_transaction , trx ) ; <nl> - emit ( self . applied_transaction , trace ) ; <nl> + emit ( self . applied_transaction , std : : tie ( trace , dtrx ) ) ; <nl> <nl> undo_session . squash ( ) ; <nl> } else { <nl> emit ( self . accepted_transaction , trx ) ; <nl> - emit ( self . applied_transaction , trace ) ; <nl> + emit ( self . applied_transaction , std : : tie ( trace , dtrx ) ) ; <nl> } <nl> <nl> return trace ; <nl> struct controller_impl { <nl> emit ( self . accepted_transaction , trx ) ; <nl> } <nl> <nl> - emit ( self . applied_transaction , trace ) ; <nl> + emit ( self . applied_transaction , std : : tie ( trace , trn ) ) ; <nl> <nl> <nl> if ( read_mode ! = db_read_mode : : SPECULATIVE & & pending - > _block_status = = controller : : block_status : : incomplete ) { <nl> struct controller_impl { <nl> } <nl> <nl> emit ( self . accepted_transaction , trx ) ; <nl> - emit ( self . applied_transaction , trace ) ; <nl> + emit ( self . applied_transaction , std : : tie ( trace , trn ) ) ; <nl> <nl> return trace ; <nl> } FC_CAPTURE_AND_RETHROW ( ( trace ) ) <nl> mmm a / libraries / chain / include / eosio / chain / controller . hpp <nl> ppp b / libraries / chain / include / eosio / chain / controller . hpp <nl> namespace eosio { namespace chain { <nl> signal < void ( const block_state_ptr & ) > accepted_block ; <nl> signal < void ( const block_state_ptr & ) > irreversible_block ; <nl> signal < void ( const transaction_metadata_ptr & ) > accepted_transaction ; <nl> - signal < void ( const transaction_trace_ptr & ) > applied_transaction ; <nl> + signal < void ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > ) > applied_transaction ; <nl> signal < void ( const int & ) > bad_alloc ; <nl> <nl> / * <nl> mmm a / plugins / chain_plugin / chain_plugin . cpp <nl> ppp b / plugins / chain_plugin / chain_plugin . cpp <nl> void chain_plugin : : plugin_initialize ( const variables_map & options ) { <nl> } ) ; <nl> <nl> my - > applied_transaction_connection = my - > chain - > applied_transaction . connect ( <nl> - [ this ] ( const transaction_trace_ptr & trace ) { <nl> - my - > applied_transaction_channel . publish ( priority : : low , trace ) ; <nl> + [ this ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > t ) { <nl> + my - > applied_transaction_channel . publish ( priority : : low , std : : get < 0 > ( t ) ) ; <nl> } ) ; <nl> <nl> my - > chain - > add_indices ( ) ; <nl> mmm a / plugins / history_plugin / history_plugin . cpp <nl> ppp b / plugins / history_plugin / history_plugin . cpp <nl> namespace eosio { <nl> db . add_index < public_key_history_multi_index > ( ) ; <nl> <nl> my - > applied_transaction_connection . emplace ( <nl> - chain . applied_transaction . connect ( [ & ] ( const transaction_trace_ptr & p ) { <nl> - my - > on_applied_transaction ( p ) ; <nl> + chain . applied_transaction . connect ( [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > t ) { <nl> + my - > on_applied_transaction ( std : : get < 0 > ( t ) ) ; <nl> } ) ) ; <nl> } FC_LOG_AND_RETHROW ( ) <nl> } <nl> mmm a / plugins / mongo_db_plugin / mongo_db_plugin . cpp <nl> ppp b / plugins / mongo_db_plugin / mongo_db_plugin . cpp <nl> void mongo_db_plugin : : plugin_initialize ( const variables_map & options ) <nl> my - > accepted_transaction ( t ) ; <nl> } ) ) ; <nl> my - > applied_transaction_connection . emplace ( <nl> - chain . applied_transaction . connect ( [ & ] ( const chain : : transaction_trace_ptr & t ) { <nl> - my - > applied_transaction ( t ) ; <nl> + chain . applied_transaction . connect ( [ & ] ( std : : tuple < const chain : : transaction_trace_ptr & , const chain : : signed_transaction & > t ) { <nl> + my - > applied_transaction ( std : : get < 0 > ( t ) ) ; <nl> } ) ) ; <nl> <nl> if ( my - > wipe_database_on_startup ) { <nl> mmm a / plugins / state_history_plugin / include / eosio / state_history_plugin / state_history_plugin . hpp <nl> ppp b / plugins / state_history_plugin / include / eosio / state_history_plugin / state_history_plugin . hpp <nl> using std : : shared_ptr ; <nl> <nl> typedef shared_ptr < struct state_history_plugin_impl > state_history_ptr ; <nl> <nl> + struct partial_transaction { <nl> + chain : : time_point_sec expiration = { } ; <nl> + uint16_t ref_block_num = { } ; <nl> + uint32_t ref_block_prefix = { } ; <nl> + fc : : unsigned_int max_net_usage_words = { } ; <nl> + uint8_t max_cpu_usage_ms = { } ; <nl> + fc : : unsigned_int delay_sec = { } ; <nl> + chain : : extensions_type transaction_extensions = { } ; <nl> + vector < chain : : signature_type > signatures = { } ; <nl> + vector < bytes > context_free_data = { } ; <nl> + <nl> + partial_transaction ( const chain : : signed_transaction & t ) <nl> + : expiration ( t . expiration ) <nl> + , ref_block_num ( t . ref_block_num ) <nl> + , ref_block_prefix ( t . ref_block_prefix ) <nl> + , max_net_usage_words ( t . max_net_usage_words ) <nl> + , max_cpu_usage_ms ( t . max_cpu_usage_ms ) <nl> + , delay_sec ( t . delay_sec ) <nl> + , transaction_extensions ( t . transaction_extensions ) <nl> + , signatures ( t . signatures ) <nl> + , context_free_data ( t . context_free_data ) { } <nl> + } ; <nl> + <nl> + struct augmented_transaction_trace { <nl> + chain : : transaction_trace_ptr trace ; <nl> + std : : shared_ptr < partial_transaction > partial ; <nl> + <nl> + augmented_transaction_trace ( ) = default ; <nl> + augmented_transaction_trace ( const augmented_transaction_trace & ) = default ; <nl> + augmented_transaction_trace ( augmented_transaction_trace & & ) = default ; <nl> + <nl> + augmented_transaction_trace ( const chain : : transaction_trace_ptr & trace ) <nl> + : trace { trace } { } <nl> + <nl> + augmented_transaction_trace ( const chain : : transaction_trace_ptr & trace , <nl> + const std : : shared_ptr < partial_transaction > & partial ) <nl> + : trace { trace } <nl> + , partial { partial } { } <nl> + <nl> + augmented_transaction_trace ( const chain : : transaction_trace_ptr & trace , const chain : : signed_transaction & t ) <nl> + : trace { trace } <nl> + , partial { std : : make_shared < partial_transaction > ( t ) } { } <nl> + <nl> + augmented_transaction_trace & operator = ( const augmented_transaction_trace & ) = default ; <nl> + augmented_transaction_trace & operator = ( augmented_transaction_trace & & ) = default ; <nl> + } ; <nl> + <nl> struct table_delta { <nl> fc : : unsigned_int struct_version = 0 ; <nl> std : : string name { } ; <nl> mmm a / plugins / state_history_plugin / include / eosio / state_history_plugin / state_history_serialization . hpp <nl> ppp b / plugins / state_history_plugin / include / eosio / state_history_plugin / state_history_serialization . hpp <nl> datastream < ST > & operator < < ( datastream < ST > & ds , const history_serial_wrapper < eosi <nl> } <nl> <nl> template < typename ST > <nl> - datastream < ST > & operator < < ( datastream < ST > & ds , <nl> - const history_context_wrapper < uint8_t , eosio : : chain : : transaction_trace > & obj ) { <nl> + datastream < ST > & operator < < ( datastream < ST > & ds , <nl> + const history_context_wrapper < uint8_t , eosio : : augmented_transaction_trace > & obj ) { <nl> + auto & trace = * obj . obj . trace ; <nl> fc : : raw : : pack ( ds , fc : : unsigned_int ( 0 ) ) ; <nl> - fc : : raw : : pack ( ds , as_type < eosio : : chain : : transaction_id_type > ( obj . obj . id ) ) ; <nl> - if ( obj . obj . receipt ) { <nl> - if ( obj . obj . failed_dtrx_trace & & <nl> - obj . obj . receipt - > status . value = = eosio : : chain : : transaction_receipt_header : : soft_fail ) <nl> + fc : : raw : : pack ( ds , as_type < eosio : : chain : : transaction_id_type > ( trace . id ) ) ; <nl> + if ( trace . receipt ) { <nl> + if ( trace . failed_dtrx_trace & & trace . receipt - > status . value = = eosio : : chain : : transaction_receipt_header : : soft_fail ) <nl> fc : : raw : : pack ( ds , uint8_t ( eosio : : chain : : transaction_receipt_header : : executed ) ) ; <nl> else <nl> - fc : : raw : : pack ( ds , as_type < uint8_t > ( obj . obj . receipt - > status . value ) ) ; <nl> - fc : : raw : : pack ( ds , as_type < uint32_t > ( obj . obj . receipt - > cpu_usage_us ) ) ; <nl> - fc : : raw : : pack ( ds , as_type < fc : : unsigned_int > ( obj . obj . receipt - > net_usage_words ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < uint8_t > ( trace . receipt - > status . value ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < uint32_t > ( trace . receipt - > cpu_usage_us ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < fc : : unsigned_int > ( trace . receipt - > net_usage_words ) ) ; <nl> } else { <nl> fc : : raw : : pack ( ds , uint8_t ( obj . context ) ) ; <nl> fc : : raw : : pack ( ds , uint32_t ( 0 ) ) ; <nl> fc : : raw : : pack ( ds , fc : : unsigned_int ( 0 ) ) ; <nl> } <nl> - fc : : raw : : pack ( ds , as_type < int64_t > ( obj . obj . elapsed . count ( ) ) ) ; <nl> - fc : : raw : : pack ( ds , as_type < uint64_t > ( obj . obj . net_usage ) ) ; <nl> - fc : : raw : : pack ( ds , as_type < bool > ( obj . obj . scheduled ) ) ; <nl> - history_serialize_container ( ds , obj . db , as_type < std : : vector < eosio : : chain : : action_trace > > ( obj . obj . action_traces ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < int64_t > ( trace . elapsed . count ( ) ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < uint64_t > ( trace . net_usage ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < bool > ( trace . scheduled ) ) ; <nl> + history_serialize_container ( ds , obj . db , as_type < std : : vector < eosio : : chain : : action_trace > > ( trace . action_traces ) ) ; <nl> <nl> - fc : : raw : : pack ( ds , bool ( obj . obj . account_ram_delta ) ) ; <nl> - if ( obj . obj . account_ram_delta ) { <nl> + fc : : raw : : pack ( ds , bool ( trace . account_ram_delta ) ) ; <nl> + if ( trace . account_ram_delta ) { <nl> fc : : raw : : pack ( <nl> - ds , make_history_serial_wrapper ( obj . db , as_type < eosio : : chain : : account_delta > ( * obj . obj . account_ram_delta ) ) ) ; <nl> + ds , make_history_serial_wrapper ( obj . db , as_type < eosio : : chain : : account_delta > ( * trace . account_ram_delta ) ) ) ; <nl> } <nl> <nl> fc : : optional < std : : string > e ; <nl> - if ( obj . obj . except ) <nl> - e = obj . obj . except - > to_string ( ) ; <nl> + if ( trace . except ) <nl> + e = trace . except - > to_string ( ) ; <nl> fc : : raw : : pack ( ds , as_type < fc : : optional < std : : string > > ( e ) ) ; <nl> - fc : : raw : : pack ( ds , as_type < fc : : optional < uint64_t > > ( obj . obj . error_code ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < fc : : optional < uint64_t > > ( trace . error_code ) ) ; <nl> <nl> - fc : : raw : : pack ( ds , bool ( obj . obj . failed_dtrx_trace ) ) ; <nl> - if ( obj . obj . failed_dtrx_trace ) { <nl> + fc : : raw : : pack ( ds , bool ( trace . failed_dtrx_trace ) ) ; <nl> + if ( trace . failed_dtrx_trace ) { <nl> uint8_t stat = eosio : : chain : : transaction_receipt_header : : hard_fail ; <nl> - if ( obj . obj . receipt & & obj . obj . receipt - > status . value = = eosio : : chain : : transaction_receipt_header : : soft_fail ) <nl> + if ( trace . receipt & & trace . receipt - > status . value = = eosio : : chain : : transaction_receipt_header : : soft_fail ) <nl> stat = eosio : : chain : : transaction_receipt_header : : soft_fail ; <nl> - fc : : raw : : pack ( ds , make_history_context_wrapper ( obj . db , stat , * obj . obj . failed_dtrx_trace ) ) ; <nl> + fc : : raw : : pack ( / / <nl> + ds , make_history_context_wrapper ( <nl> + obj . db , stat , eosio : : augmented_transaction_trace { trace . failed_dtrx_trace , obj . obj . partial } ) ) ; <nl> + } <nl> + <nl> + bool include_partial = obj . obj . partial & & ! trace . failed_dtrx_trace ; <nl> + fc : : raw : : pack ( ds , include_partial ) ; <nl> + if ( include_partial ) { <nl> + auto & partial = * obj . obj . partial ; <nl> + fc : : raw : : pack ( ds , fc : : unsigned_int ( 0 ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < eosio : : chain : : time_point_sec > ( partial . expiration ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < uint16_t > ( partial . ref_block_num ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < uint32_t > ( partial . ref_block_prefix ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < fc : : unsigned_int > ( partial . max_net_usage_words ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < uint8_t > ( partial . max_cpu_usage_ms ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < fc : : unsigned_int > ( partial . delay_sec ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < eosio : : chain : : extensions_type > ( partial . transaction_extensions ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < std : : vector < eosio : : chain : : signature_type > > ( partial . signatures ) ) ; <nl> + fc : : raw : : pack ( ds , as_type < std : : vector < eosio : : bytes > > ( partial . context_free_data ) ) ; <nl> } <nl> <nl> return ds ; <nl> } <nl> <nl> template < typename ST > <nl> - datastream < ST > & operator < < ( datastream < ST > & ds , const history_serial_wrapper < eosio : : chain : : transaction_trace > & obj ) { <nl> + datastream < ST > & operator < < ( datastream < ST > & ds , const history_serial_wrapper < eosio : : augmented_transaction_trace > & obj ) { <nl> uint8_t stat = eosio : : chain : : transaction_receipt_header : : hard_fail ; <nl> ds < < make_history_context_wrapper ( obj . db , stat , obj . obj ) ; <nl> return ds ; <nl> mmm a / plugins / state_history_plugin / state_history_plugin . cpp <nl> ppp b / plugins / state_history_plugin / state_history_plugin . cpp <nl> bool include_delta ( const eosio : : chain : : code_object & old , const eosio : : chain : : cod <nl> } <nl> <nl> struct state_history_plugin_impl : std : : enable_shared_from_this < state_history_plugin_impl > { <nl> - chain_plugin * chain_plug = nullptr ; <nl> - fc : : optional < state_history_log > trace_log ; <nl> - fc : : optional < state_history_log > chain_state_log ; <nl> - bool stopping = false ; <nl> - fc : : optional < scoped_connection > applied_transaction_connection ; <nl> - fc : : optional < scoped_connection > accepted_block_connection ; <nl> - string endpoint_address = " 0 . 0 . 0 . 0 " ; <nl> - uint16_t endpoint_port = 8080 ; <nl> - std : : unique_ptr < tcp : : acceptor > acceptor ; <nl> - std : : map < transaction_id_type , transaction_trace_ptr > cached_traces ; <nl> - transaction_trace_ptr onblock_trace ; <nl> + chain_plugin * chain_plug = nullptr ; <nl> + fc : : optional < state_history_log > trace_log ; <nl> + fc : : optional < state_history_log > chain_state_log ; <nl> + bool stopping = false ; <nl> + fc : : optional < scoped_connection > applied_transaction_connection ; <nl> + fc : : optional < scoped_connection > accepted_block_connection ; <nl> + string endpoint_address = " 0 . 0 . 0 . 0 " ; <nl> + uint16_t endpoint_port = 8080 ; <nl> + std : : unique_ptr < tcp : : acceptor > acceptor ; <nl> + std : : map < transaction_id_type , augmented_transaction_trace > cached_traces ; <nl> + fc : : optional < augmented_transaction_trace > onblock_trace ; <nl> <nl> void get_log_entry ( state_history_log & log , uint32_t block_num , fc : : optional < bytes > & result ) { <nl> if ( block_num < log . begin_block ( ) | | block_num > = log . end_block ( ) ) <nl> struct state_history_plugin_impl : std : : enable_shared_from_this < state_history_pl <nl> auth . permission = = eosio : : chain : : config : : active_name ; <nl> } <nl> <nl> - void on_applied_transaction ( const transaction_trace_ptr & p ) { <nl> + void on_applied_transaction ( const transaction_trace_ptr & p , const signed_transaction & t ) { <nl> if ( p - > receipt & & trace_log ) { <nl> if ( is_onblock ( p ) ) <nl> - onblock_trace = p ; <nl> + onblock_trace . emplace ( p , t ) ; <nl> else if ( p - > failed_dtrx_trace ) <nl> - cached_traces [ p - > failed_dtrx_trace - > id ] = p ; <nl> + cached_traces [ p - > failed_dtrx_trace - > id ] = augmented_transaction_trace { p , t } ; <nl> else <nl> - cached_traces [ p - > id ] = p ; <nl> + cached_traces [ p - > id ] = augmented_transaction_trace { p , t } ; <nl> } <nl> } <nl> <nl> struct state_history_plugin_impl : std : : enable_shared_from_this < state_history_pl <nl> void store_traces ( const block_state_ptr & block_state ) { <nl> if ( ! trace_log ) <nl> return ; <nl> - std : : vector < transaction_trace_ptr > traces ; <nl> + std : : vector < augmented_transaction_trace > traces ; <nl> if ( onblock_trace ) <nl> - traces . push_back ( onblock_trace ) ; <nl> + traces . push_back ( * onblock_trace ) ; <nl> for ( auto & r : block_state - > block - > transactions ) { <nl> transaction_id_type id ; <nl> if ( r . trx . contains < transaction_id_type > ( ) ) <nl> struct state_history_plugin_impl : std : : enable_shared_from_this < state_history_pl <nl> else <nl> id = r . trx . get < packed_transaction > ( ) . id ( ) ; <nl> auto it = cached_traces . find ( id ) ; <nl> - EOS_ASSERT ( it ! = cached_traces . end ( ) & & it - > second - > receipt , plugin_exception , <nl> + EOS_ASSERT ( it ! = cached_traces . end ( ) & & it - > second . trace - > receipt , plugin_exception , <nl> " missing trace for transaction $ { id } " , ( " id " , id ) ) ; <nl> traces . push_back ( it - > second ) ; <nl> } <nl> void state_history_plugin : : plugin_initialize ( const variables_map & options ) { <nl> EOS_ASSERT ( my - > chain_plug , chain : : missing_chain_plugin_exception , " " ) ; <nl> auto & chain = my - > chain_plug - > chain ( ) ; <nl> my - > applied_transaction_connection . emplace ( <nl> - chain . applied_transaction . connect ( [ & ] ( const transaction_trace_ptr & p ) { my - > on_applied_transaction ( p ) ; } ) ) ; <nl> + chain . applied_transaction . connect ( [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > t ) { <nl> + my - > on_applied_transaction ( std : : get < 0 > ( t ) , std : : get < 1 > ( t ) ) ; <nl> + } ) ) ; <nl> my - > accepted_block_connection . emplace ( <nl> chain . accepted_block . connect ( [ & ] ( const block_state_ptr & p ) { my - > on_accepted_block ( p ) ; } ) ) ; <nl> <nl> mmm a / plugins / state_history_plugin / state_history_plugin_abi . cpp <nl> ppp b / plugins / state_history_plugin / state_history_plugin_abi . cpp <nl> extern const char * const state_history_plugin_abi = R " ( { <nl> { " name " : " error_code " , " type " : " uint64 ? " } <nl> ] <nl> } , <nl> + { <nl> + " name " : " partial_transaction_v0 " , " fields " : [ <nl> + { " name " : " expiration " , " type " : " time_point_sec " } , <nl> + { " name " : " ref_block_num " , " type " : " uint16 " } , <nl> + { " name " : " ref_block_prefix " , " type " : " uint32 " } , <nl> + { " name " : " max_net_usage_words " , " type " : " varuint32 " } , <nl> + { " name " : " max_cpu_usage_ms " , " type " : " uint8 " } , <nl> + { " name " : " delay_sec " , " type " : " varuint32 " } , <nl> + { " name " : " transaction_extensions " , " type " : " extension [ ] " } , <nl> + { " name " : " signatures " , " type " : " signature [ ] " } , <nl> + { " name " : " context_free_data " , " type " : " bytes [ ] " } <nl> + ] <nl> + } , <nl> { <nl> " name " : " transaction_trace_v0 " , " fields " : [ <nl> { " name " : " id " , " type " : " checksum256 " } , <nl> extern const char * const state_history_plugin_abi = R " ( { <nl> { " name " : " account_ram_delta " , " type " : " account_delta ? " } , <nl> { " name " : " except " , " type " : " string ? " } , <nl> { " name " : " error_code " , " type " : " uint64 ? " } , <nl> - { " name " : " failed_dtrx_trace " , " type " : " transaction_trace ? " } <nl> + { " name " : " failed_dtrx_trace " , " type " : " transaction_trace ? " } , <nl> + { " name " : " partial " , " type " : " partial_transaction ? " } <nl> ] <nl> } , <nl> { <nl> extern const char * const state_history_plugin_abi = R " ( { <nl> <nl> { " name " : " action_receipt " , " types " : [ " action_receipt_v0 " ] } , <nl> { " name " : " action_trace " , " types " : [ " action_trace_v0 " ] } , <nl> + { " name " : " partial_transaction " , " types " : [ " partial_transaction_v0 " ] } , <nl> { " name " : " transaction_trace " , " types " : [ " transaction_trace_v0 " ] } , <nl> { " name " : " transaction_variant " , " types " : [ " transaction_id " , " packed_transaction " ] } , <nl> <nl> mmm a / unittests / api_tests . cpp <nl> ppp b / unittests / api_tests . cpp <nl> BOOST_FIXTURE_TEST_CASE ( transaction_tests , TESTER ) { try { <nl> { <nl> produce_blocks ( 10 ) ; <nl> transaction_trace_ptr trace ; <nl> - auto c = control - > applied_transaction . connect ( [ & ] ( const transaction_trace_ptr & t ) { if ( t & & t - > receipt & & t - > receipt - > status ! = transaction_receipt : : executed ) { trace = t ; } } ) ; <nl> + auto c = control - > applied_transaction . connect ( [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > x ) { <nl> + auto & t = std : : get < 0 > ( x ) ; <nl> + if ( t & & t - > receipt & & t - > receipt - > status ! = transaction_receipt : : executed ) { trace = t ; } <nl> + } ) ; <nl> <nl> / / test error handling on deferred transaction failure <nl> CALL_TEST_FUNCTION ( * this , " test_transaction " , " send_transaction_trigger_error_handler " , { } ) ; <nl> BOOST_FIXTURE_TEST_CASE ( deferred_transaction_tests , TESTER ) { try { <nl> / / schedule <nl> { <nl> transaction_trace_ptr trace ; <nl> - auto c = control - > applied_transaction . connect ( [ & ] ( const transaction_trace_ptr & t ) { if ( t - > scheduled ) { trace = t ; } } ) ; <nl> + auto c = control - > applied_transaction . connect ( [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > x ) { <nl> + auto & t = std : : get < 0 > ( x ) ; <nl> + if ( t - > scheduled ) { trace = t ; } <nl> + } ) ; <nl> CALL_TEST_FUNCTION ( * this , " test_transaction " , " send_deferred_transaction " , { } ) ; <nl> BOOST_CHECK ( ! trace ) ; <nl> produce_block ( fc : : seconds ( 2 ) ) ; <nl> BOOST_FIXTURE_TEST_CASE ( deferred_transaction_tests , TESTER ) { try { <nl> { <nl> transaction_trace_ptr trace ; <nl> uint32_t count = 0 ; <nl> - auto c = control - > applied_transaction . connect ( [ & ] ( const transaction_trace_ptr & t ) { if ( t & & t - > scheduled ) { trace = t ; + + count ; } } ) ; <nl> + auto c = control - > applied_transaction . connect ( [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > x ) { <nl> + auto & t = std : : get < 0 > ( x ) ; <nl> + if ( t & & t - > scheduled ) { trace = t ; + + count ; } <nl> + } ) ; <nl> CALL_TEST_FUNCTION ( * this , " test_transaction " , " send_deferred_transaction " , { } ) ; <nl> BOOST_CHECK_THROW ( CALL_TEST_FUNCTION ( * this , " test_transaction " , " send_deferred_transaction " , { } ) , deferred_tx_duplicate ) ; <nl> produce_blocks ( 3 ) ; <nl> BOOST_FIXTURE_TEST_CASE ( deferred_transaction_tests , TESTER ) { try { <nl> { <nl> transaction_trace_ptr trace ; <nl> uint32_t count = 0 ; <nl> - auto c = control - > applied_transaction . connect ( [ & ] ( const transaction_trace_ptr & t ) { if ( t & & t - > scheduled ) { trace = t ; + + count ; } } ) ; <nl> + auto c = control - > applied_transaction . connect ( [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > x ) { <nl> + auto & t = std : : get < 0 > ( x ) ; <nl> + if ( t & & t - > scheduled ) { trace = t ; + + count ; } <nl> + } ) ; <nl> CALL_TEST_FUNCTION ( * this , " test_transaction " , " send_deferred_transaction_replace " , { } ) ; <nl> CALL_TEST_FUNCTION ( * this , " test_transaction " , " send_deferred_transaction_replace " , { } ) ; <nl> produce_blocks ( 3 ) ; <nl> BOOST_FIXTURE_TEST_CASE ( deferred_transaction_tests , TESTER ) { try { <nl> / / schedule and cancel <nl> { <nl> transaction_trace_ptr trace ; <nl> - auto c = control - > applied_transaction . connect ( [ & ] ( const transaction_trace_ptr & t ) { if ( t & & t - > scheduled ) { trace = t ; } } ) ; <nl> + auto c = control - > applied_transaction . connect ( [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > x ) { <nl> + auto & t = std : : get < 0 > ( x ) ; <nl> + if ( t & & t - > scheduled ) { trace = t ; } <nl> + } ) ; <nl> CALL_TEST_FUNCTION ( * this , " test_transaction " , " send_deferred_transaction " , { } ) ; <nl> CALL_TEST_FUNCTION ( * this , " test_transaction " , " cancel_deferred_transaction_success " , { } ) ; <nl> produce_block ( fc : : seconds ( 2 ) ) ; <nl> BOOST_FIXTURE_TEST_CASE ( deferred_transaction_tests , TESTER ) { try { <nl> / / repeated deferred transactions <nl> { <nl> vector < transaction_trace_ptr > traces ; <nl> - auto c = control - > applied_transaction . connect ( [ & ] ( const transaction_trace_ptr & t ) { <nl> + auto c = control - > applied_transaction . connect ( [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > x ) { <nl> + auto & t = std : : get < 0 > ( x ) ; <nl> if ( t & & t - > scheduled ) { <nl> traces . push_back ( t ) ; <nl> } <nl> mmm a / unittests / protocol_feature_tests . cpp <nl> ppp b / unittests / protocol_feature_tests . cpp <nl> BOOST_AUTO_TEST_CASE ( no_duplicate_deferred_id_test ) try { <nl> c2 . produce_empty_block ( fc : : minutes ( 10 ) ) ; <nl> <nl> transaction_trace_ptr trace0 ; <nl> - auto h = c2 . control - > applied_transaction . connect ( [ & ] ( const transaction_trace_ptr & t ) { <nl> + auto h = c2 . control - > applied_transaction . connect ( [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > x ) { <nl> + auto & t = std : : get < 0 > ( x ) ; <nl> if ( t & & t - > receipt & & t - > receipt - > status = = transaction_receipt : : expired ) { <nl> trace0 = t ; <nl> } <nl> mmm a / unittests / whitelist_blacklist_tests . cpp <nl> ppp b / unittests / whitelist_blacklist_tests . cpp <nl> BOOST_AUTO_TEST_CASE ( actor_blacklist_inline_deferred ) { try { <nl> tester2 . chain - > push_block ( b ) ; <nl> } <nl> <nl> - auto log_trxs = [ & ] ( const transaction_trace_ptr & t ) { <nl> + auto log_trxs = [ & ] ( std : : tuple < const transaction_trace_ptr & , const signed_transaction & > x ) { <nl> + auto & t = std : : get < 0 > ( x ) ; <nl> if ( ! t | | t - > action_traces . size ( ) = = 0 ) return ; <nl> <nl> const auto & act = t - > action_traces [ 0 ] . act ; <nl>
Merge pull request from EOSIO / ship - partial - transaction
EOSIO/eos
d891a76dd1fce81adfe69523197931ca6d2db9a6
2019-04-17T23:24:51Z
mmm a / src / codegen / x64 / macro - assembler - x64 . cc <nl> ppp b / src / codegen / x64 / macro - assembler - x64 . cc <nl> void TurboAssembler : : Cvtsd2ss ( XMMRegister dst , Operand src ) { <nl> void TurboAssembler : : Cvtlsi2sd ( XMMRegister dst , Register src ) { <nl> if ( CpuFeatures : : IsSupported ( AVX ) ) { <nl> CpuFeatureScope scope ( this , AVX ) ; <nl> - vxorpd ( dst , dst , dst ) ; <nl> - vcvtlsi2sd ( dst , dst , src ) ; <nl> + vcvtlsi2sd ( dst , kScratchDoubleReg , src ) ; <nl> } else { <nl> xorpd ( dst , dst ) ; <nl> cvtlsi2sd ( dst , src ) ; <nl> void TurboAssembler : : Cvtlsi2sd ( XMMRegister dst , Register src ) { <nl> void TurboAssembler : : Cvtlsi2sd ( XMMRegister dst , Operand src ) { <nl> if ( CpuFeatures : : IsSupported ( AVX ) ) { <nl> CpuFeatureScope scope ( this , AVX ) ; <nl> - vxorpd ( dst , dst , dst ) ; <nl> - vcvtlsi2sd ( dst , dst , src ) ; <nl> + vcvtlsi2sd ( dst , kScratchDoubleReg , src ) ; <nl> } else { <nl> xorpd ( dst , dst ) ; <nl> cvtlsi2sd ( dst , src ) ; <nl> void TurboAssembler : : Cvtlsi2sd ( XMMRegister dst , Operand src ) { <nl> void TurboAssembler : : Cvtlsi2ss ( XMMRegister dst , Register src ) { <nl> if ( CpuFeatures : : IsSupported ( AVX ) ) { <nl> CpuFeatureScope scope ( this , AVX ) ; <nl> - vxorps ( dst , dst , dst ) ; <nl> - vcvtlsi2ss ( dst , dst , src ) ; <nl> + vcvtlsi2ss ( dst , kScratchDoubleReg , src ) ; <nl> } else { <nl> xorps ( dst , dst ) ; <nl> cvtlsi2ss ( dst , src ) ; <nl> void TurboAssembler : : Cvtlsi2ss ( XMMRegister dst , Register src ) { <nl> void TurboAssembler : : Cvtlsi2ss ( XMMRegister dst , Operand src ) { <nl> if ( CpuFeatures : : IsSupported ( AVX ) ) { <nl> CpuFeatureScope scope ( this , AVX ) ; <nl> - vxorps ( dst , dst , dst ) ; <nl> - vcvtlsi2ss ( dst , dst , src ) ; <nl> + vcvtlsi2ss ( dst , kScratchDoubleReg , src ) ; <nl> } else { <nl> xorps ( dst , dst ) ; <nl> cvtlsi2ss ( dst , src ) ; <nl> void TurboAssembler : : Cvtlsi2ss ( XMMRegister dst , Operand src ) { <nl> void TurboAssembler : : Cvtqsi2ss ( XMMRegister dst , Register src ) { <nl> if ( CpuFeatures : : IsSupported ( AVX ) ) { <nl> CpuFeatureScope scope ( this , AVX ) ; <nl> - vxorps ( dst , dst , dst ) ; <nl> - vcvtqsi2ss ( dst , dst , src ) ; <nl> + vcvtqsi2ss ( dst , kScratchDoubleReg , src ) ; <nl> } else { <nl> xorps ( dst , dst ) ; <nl> cvtqsi2ss ( dst , src ) ; <nl> void TurboAssembler : : Cvtqsi2ss ( XMMRegister dst , Register src ) { <nl> void TurboAssembler : : Cvtqsi2ss ( XMMRegister dst , Operand src ) { <nl> if ( CpuFeatures : : IsSupported ( AVX ) ) { <nl> CpuFeatureScope scope ( this , AVX ) ; <nl> - vxorps ( dst , dst , dst ) ; <nl> - vcvtqsi2ss ( dst , dst , src ) ; <nl> + vcvtqsi2ss ( dst , kScratchDoubleReg , src ) ; <nl> } else { <nl> xorps ( dst , dst ) ; <nl> cvtqsi2ss ( dst , src ) ; <nl> void TurboAssembler : : Cvtqsi2ss ( XMMRegister dst , Operand src ) { <nl> void TurboAssembler : : Cvtqsi2sd ( XMMRegister dst , Register src ) { <nl> if ( CpuFeatures : : IsSupported ( AVX ) ) { <nl> CpuFeatureScope scope ( this , AVX ) ; <nl> - vxorpd ( dst , dst , dst ) ; <nl> - vcvtqsi2sd ( dst , dst , src ) ; <nl> + vcvtqsi2sd ( dst , kScratchDoubleReg , src ) ; <nl> } else { <nl> xorpd ( dst , dst ) ; <nl> cvtqsi2sd ( dst , src ) ; <nl> void TurboAssembler : : Cvtqsi2sd ( XMMRegister dst , Register src ) { <nl> void TurboAssembler : : Cvtqsi2sd ( XMMRegister dst , Operand src ) { <nl> if ( CpuFeatures : : IsSupported ( AVX ) ) { <nl> CpuFeatureScope scope ( this , AVX ) ; <nl> - vxorpd ( dst , dst , dst ) ; <nl> - vcvtqsi2sd ( dst , dst , src ) ; <nl> + vcvtqsi2sd ( dst , kScratchDoubleReg , src ) ; <nl> } else { <nl> xorpd ( dst , dst ) ; <nl> cvtqsi2sd ( dst , src ) ; <nl> mmm a / src / codegen / x64 / macro - assembler - x64 . h <nl> ppp b / src / codegen / x64 / macro - assembler - x64 . h <nl> class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { <nl> void Cvttss2si ( Register dst , Operand src ) ; <nl> void Cvttss2siq ( Register dst , XMMRegister src ) ; <nl> void Cvttss2siq ( Register dst , Operand src ) ; <nl> - void Cvtqsi2ss ( XMMRegister dst , Register src ) ; <nl> - void Cvtqsi2ss ( XMMRegister dst , Operand src ) ; <nl> - void Cvtqsi2sd ( XMMRegister dst , Register src ) ; <nl> - void Cvtqsi2sd ( XMMRegister dst , Operand src ) ; <nl> - void Cvtlsi2ss ( XMMRegister dst , Register src ) ; <nl> - void Cvtlsi2ss ( XMMRegister dst , Operand src ) ; <nl> void Cvtlui2ss ( XMMRegister dst , Register src ) ; <nl> void Cvtlui2ss ( XMMRegister dst , Operand src ) ; <nl> void Cvtlui2sd ( XMMRegister dst , Register src ) ; <nl> class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { <nl> void Cvttss2uiq ( Register dst , Operand src , Label * fail = nullptr ) ; <nl> void Cvttss2uiq ( Register dst , XMMRegister src , Label * fail = nullptr ) ; <nl> <nl> - / / cvtsi2sd instruction only writes to the low 64 - bit of dst register , which <nl> - / / hinders register renaming and makes dependence chains longer . So we use <nl> - / / xorpd to clear the dst register before cvtsi2sd to solve this issue . <nl> + / / cvtsi2sd and cvtsi2ss instructions only write to the low 64 / 32 - bit of dst <nl> + / / register , which hinders register renaming and makes dependence chains <nl> + / / longer . So we use xorpd to clear the dst register before cvtsi2sd for <nl> + / / non - AVX and a scratch XMM register as first src for AVX to solve this <nl> + / / issue . <nl> + void Cvtqsi2ss ( XMMRegister dst , Register src ) ; <nl> + void Cvtqsi2ss ( XMMRegister dst , Operand src ) ; <nl> + void Cvtqsi2sd ( XMMRegister dst , Register src ) ; <nl> + void Cvtqsi2sd ( XMMRegister dst , Operand src ) ; <nl> + void Cvtlsi2ss ( XMMRegister dst , Register src ) ; <nl> + void Cvtlsi2ss ( XMMRegister dst , Operand src ) ; <nl> void Cvtlsi2sd ( XMMRegister dst , Register src ) ; <nl> void Cvtlsi2sd ( XMMRegister dst , Operand src ) ; <nl> <nl>
[ x64 ] Use scratch double register to break dependency for vcvtsi2sd and vcvtsi2ss
v8/v8
ca9f4dab66afa8a963d8996ecf300a86fbce16d6
2019-12-19T05:13:29Z
mmm a / Telegram / SourceFiles / ui / widgets / popup_menu . cpp <nl> ppp b / Telegram / SourceFiles / ui / widgets / popup_menu . cpp <nl> PopupMenu : : PopupMenu ( QWidget * , QMenu * menu , const style : : PopupMenu & st ) : TWidge <nl> <nl> for ( auto action : actions ( ) ) { <nl> if ( auto submenu = action - > menu ( ) ) { <nl> - auto it = _submenus . insert ( action , new PopupMenu ( submenu , st ) ) ; <nl> + auto it = _submenus . insert ( action , new PopupMenu ( nullptr , submenu , st ) ) ; <nl> it . value ( ) - > deleteOnHide ( false ) ; <nl> } <nl> } <nl>
Fixed Ui : : PopupMenu submenu creation .
telegramdesktop/tdesktop
3c18532612bb7e3e72e264a758b53c4e5266d8cf
2017-02-09T14:20:49Z
mmm a / src / builtins / builtins - internal - gen . cc <nl> ppp b / src / builtins / builtins - internal - gen . cc <nl> class RecordWriteCodeStubAssembler : public CodeStubAssembler { <nl> store_buffer_top_addr , new_store_buffer_top ) ; <nl> <nl> Node * test = WordAnd ( new_store_buffer_top , <nl> - IntPtrConstant ( StoreBuffer : : kStoreBufferMask ) ) ; <nl> + IntPtrConstant ( Heap : : store_buffer_mask_constant ( ) ) ) ; <nl> <nl> Label overflow ( this ) ; <nl> Branch ( WordEqual ( test , IntPtrConstant ( 0 ) ) , & overflow , next ) ; <nl> mmm a / src / external - reference . cc <nl> ppp b / src / external - reference . cc <nl> ExternalReference : : incremental_marking_record_write_function ( ) { <nl> <nl> ExternalReference ExternalReference : : store_buffer_overflow_function ( ) { <nl> return ExternalReference ( <nl> - Redirect ( FUNCTION_ADDR ( StoreBuffer : : StoreBufferOverflow ) ) ) ; <nl> + Redirect ( Heap : : store_buffer_overflow_function_address ( ) ) ) ; <nl> } <nl> <nl> ExternalReference ExternalReference : : delete_handle_scope_extensions ( ) { <nl> mmm a / src / heap / array - buffer - collector . cc <nl> ppp b / src / heap / array - buffer - collector . cc <nl> <nl> <nl> # include " src / base / template - utils . h " <nl> # include " src / heap / array - buffer - tracker . h " <nl> + # include " src / heap / gc - tracer . h " <nl> # include " src / heap / heap - inl . h " <nl> <nl> namespace v8 { <nl> mmm a / src / heap / code - stats . cc <nl> ppp b / src / heap / code - stats . cc <nl> <nl> / / found in the LICENSE file . <nl> <nl> # include " src / heap / code - stats . h " <nl> + <nl> # include " src / objects - inl . h " <nl> + # include " src / reloc - info . h " <nl> <nl> namespace v8 { <nl> namespace internal { <nl> mmm a / src / heap / factory - inl . h <nl> ppp b / src / heap / factory - inl . h <nl> <nl> <nl> # include " src / heap / factory . h " <nl> <nl> + / / Clients of this interface shouldn ' t depend on lots of heap internals . <nl> + / / Do not include anything from src / heap here ! <nl> # include " src / handles - inl . h " <nl> # include " src / objects - inl . h " <nl> # include " src / string - hasher . h " <nl> mmm a / src / heap / factory . h <nl> ppp b / src / heap / factory . h <nl> <nl> # ifndef V8_HEAP_FACTORY_H_ <nl> # define V8_HEAP_FACTORY_H_ <nl> <nl> + / / Clients of this interface shouldn ' t depend on lots of heap internals . <nl> + / / Do not include anything from src / heap here ! <nl> # include " src / builtins / builtins . h " <nl> # include " src / globals . h " <nl> # include " src / handles . h " <nl> mmm a / src / heap / heap - inl . h <nl> ppp b / src / heap / heap - inl . h <nl> <nl> # include " src / counters - inl . h " <nl> # include " src / feedback - vector . h " <nl> <nl> - / / TODO ( mstarzinger ) : There are 3 more includes to remove in order to no longer <nl> + / / TODO ( mstarzinger ) : There is one more include to remove in order to no longer <nl> / / leak heap internals to users of this interface ! <nl> - # include " src / heap / incremental - marking - inl . h " <nl> # include " src / heap / spaces - inl . h " <nl> - # include " src / heap / store - buffer - inl . h " <nl> # include " src / isolate . h " <nl> # include " src / log . h " <nl> # include " src / msan . h " <nl> bool Heap : : ShouldBePromoted ( Address old_address ) { <nl> ( ! page - > ContainsLimit ( age_mark ) | | old_address < age_mark ) ; <nl> } <nl> <nl> - Address * Heap : : store_buffer_top_address ( ) { <nl> - return store_buffer ( ) - > top_address ( ) ; <nl> - } <nl> - <nl> void Heap : : CopyBlock ( Address dst , Address src , int byte_size ) { <nl> CopyWords ( reinterpret_cast < Object * * > ( dst ) , reinterpret_cast < Object * * > ( src ) , <nl> static_cast < size_t > ( byte_size / kPointerSize ) ) ; <nl> mmm a / src / heap / heap . cc <nl> ppp b / src / heap / heap . cc <nl> void Heap : : CheckHandleCount ( ) { <nl> isolate_ - > handle_scope_implementer ( ) - > Iterate ( & v ) ; <nl> } <nl> <nl> + Address * Heap : : store_buffer_top_address ( ) { <nl> + return store_buffer ( ) - > top_address ( ) ; <nl> + } <nl> + <nl> + / / static <nl> + intptr_t Heap : : store_buffer_mask_constant ( ) { <nl> + return StoreBuffer : : kStoreBufferMask ; <nl> + } <nl> + <nl> + / / static <nl> + Address Heap : : store_buffer_overflow_function_address ( ) { <nl> + return FUNCTION_ADDR ( StoreBuffer : : StoreBufferOverflow ) ; <nl> + } <nl> + <nl> void Heap : : ClearRecordedSlot ( HeapObject * object , Object * * slot ) { <nl> Address slot_addr = reinterpret_cast < Address > ( slot ) ; <nl> Page * page = Page : : FromAddress ( slot_addr ) ; <nl> mmm a / src / heap / heap . h <nl> ppp b / src / heap / heap . h <nl> class Heap { <nl> <nl> void SetIsMarkingFlag ( uint8_t flag ) { is_marking_flag_ = flag ; } <nl> <nl> - inline Address * store_buffer_top_address ( ) ; <nl> + Address * store_buffer_top_address ( ) ; <nl> + static intptr_t store_buffer_mask_constant ( ) ; <nl> + static Address store_buffer_overflow_function_address ( ) ; <nl> <nl> void ClearRecordedSlot ( HeapObject * object , Object * * slot ) ; <nl> void ClearRecordedSlotRange ( Address start , Address end ) ; <nl> mmm a / src / heap / incremental - marking . cc <nl> ppp b / src / heap / incremental - marking . cc <nl> <nl> # include " src / heap / gc - idle - time - handler . h " <nl> # include " src / heap / gc - tracer . h " <nl> # include " src / heap / heap - inl . h " <nl> + # include " src / heap / incremental - marking - inl . h " <nl> # include " src / heap / mark - compact - inl . h " <nl> # include " src / heap / object - stats . h " <nl> # include " src / heap / objects - visiting - inl . h " <nl> mmm a / src / heap / spaces . cc <nl> ppp b / src / heap / spaces . cc <nl> <nl> # include " src / heap / heap - controller . h " <nl> # include " src / heap / incremental - marking . h " <nl> # include " src / heap / mark - compact . h " <nl> + # include " src / heap / remembered - set . h " <nl> # include " src / heap / slot - set . h " <nl> # include " src / heap / sweeper . h " <nl> # include " src / msan . h " <nl> mmm a / src / objects / maybe - object - inl . h <nl> ppp b / src / objects / maybe - object - inl . h <nl> <nl> # ifndef V8_OBJECTS_MAYBE_OBJECT_INL_H_ <nl> # define V8_OBJECTS_MAYBE_OBJECT_INL_H_ <nl> <nl> - # include " src / objects - inl . h " <nl> # include " src / objects / maybe - object . h " <nl> <nl> + # include " src / objects - inl . h " <nl> + <nl> namespace v8 { <nl> namespace internal { <nl> <nl> mmm a / test / cctest / heap / test - heap . cc <nl> ppp b / test / cctest / heap / test - heap . cc <nl> <nl> # include " src / heap / incremental - marking . h " <nl> # include " src / heap / mark - compact . h " <nl> # include " src / heap / memory - reducer . h " <nl> + # include " src / heap / remembered - set . h " <nl> # include " src / ic / ic . h " <nl> # include " src / macro - assembler - inl . h " <nl> # include " src / objects - inl . h " <nl>
[ heap ] [ cleanup ] Avoid exposing store - buffer internals .
v8/v8
60408d97ab2207d15e0575ff89320eca4e23e7f2
2018-08-20T14:21:26Z
mmm a / cmake / OpenCVFindLibsGrfmt . cmake <nl> ppp b / cmake / OpenCVFindLibsGrfmt . cmake <nl> <nl> if ( BUILD_ZLIB ) <nl> ocv_clear_vars ( ZLIB_FOUND ) <nl> else ( ) <nl> - include ( FindZLIB ) <nl> + find_package ( ZLIB " $ { MIN_VER_ZLIB } " ) <nl> if ( ZLIB_FOUND AND ANDROID ) <nl> if ( ZLIB_LIBRARIES STREQUAL " $ { ANDROID_SYSROOT } / usr / lib / libz . so " ) <nl> set ( ZLIB_LIBRARIES z ) <nl> mmm a / cmake / OpenCVMinDepVersions . cmake <nl> ppp b / cmake / OpenCVMinDepVersions . cmake <nl> @ @ - 1 + 1 , 2 @ @ <nl> set ( MIN_VER_CMAKE 2 . 8 . 7 ) <nl> + set ( MIN_VER_ZLIB 1 . 2 . 3 ) <nl>
Set minimal zlib version to 1 . 2 . 3 .
opencv/opencv
9c01a96b14655ae043888564cf12e6c6bc41edbd
2013-08-22T14:17:19Z
mmm a / test / unittest / readertest . cpp <nl> ppp b / test / unittest / readertest . cpp <nl> TEST ( Reader , IterativeParsing_ErrorHandling ) { <nl> TESTERRORHANDLING ( " { \ " a \ " } " , kParseErrorObjectMissColon , 4u ) ; <nl> TESTERRORHANDLING ( " { \ " a \ " : 1 " , kParseErrorObjectMissCommaOrCurlyBracket , 7u ) ; <nl> TESTERRORHANDLING ( " [ 1 2 3 ] " , kParseErrorArrayMissCommaOrSquareBracket , 3u ) ; <nl> + TESTERRORHANDLING ( " { \ " a : 1 " , kParseErrorStringMissQuotationMark , 5u ) ; <nl> <nl> / / Any JSON value can be a valid root element in RFC7159 . <nl> TESTERRORHANDLING ( " \ " ab " , kParseErrorStringMissQuotationMark , 2u ) ; <nl>
Add unittest for state transition to IterativeParsingMemberKeyState .
Tencent/rapidjson
857674737345c5aa2b4251480455578f1787d62f
2015-04-15T06:51:48Z
mmm a / tests / queries / 0_stateless / 01514_empty_buffer_different_types . reference <nl> ppp b / tests / queries / 0_stateless / 01514_empty_buffer_different_types . reference <nl> @ @ - 0 , 0 + 1 @ @ <nl> + a <nl> mmm a / tests / queries / 0_stateless / 01514_empty_buffer_different_types . sql <nl> ppp b / tests / queries / 0_stateless / 01514_empty_buffer_different_types . sql <nl> <nl> set send_logs_level = ' error ' ; <nl> <nl> DROP TABLE IF EXISTS merge_tree_table1 ; <nl> - CREATE TABLE merge_tree_table1 ( ` s ` LowCardinality ( String ) ) ENGINE = MergeTree ORDER BY tuple ( ) ; <nl> - CREATE TABLE buffer_table1 ( ` s ` String ) ENGINE = Buffer ( currentDatabase ( ) , ' merge_tree_table1 ' , 16 , 10 , 60 , 10 , 1000 , 1048576 , 2097152 ) ; <nl> - SELECT * FROM buffer_table1 ; <nl> + CREATE TABLE merge_tree_table1 ( ` s ` LowCardinality ( String ) , x UInt32 ) ENGINE = MergeTree ORDER BY x settings index_granularity = 1 ; <nl> + CREATE TABLE buffer_table1 ( ` s ` String , x UInt32 ) ENGINE = Buffer ( currentDatabase ( ) , ' merge_tree_table1 ' , 16 , 10 , 60 , 10 , 1000 , 1048576 , 2097152 ) ; <nl> + SELECT s FROM buffer_table1 ; <nl> + <nl> + insert into merge_tree_table1 values ( ' a ' , 1 ) ; <nl> + select s from buffer_table1 where x = 1 ; <nl> + select s from buffer_table1 where x = 2 ; <nl> <nl> DROP TABLE IF EXISTS merge_tree_table1 ; <nl>
Update test .
ClickHouse/ClickHouse
563ad62e1c7e5f2ff528464d646e4e0e01a12005
2020-10-06T10:44:05Z
mmm a / include / swift / AST / ASTContext . h <nl> ppp b / include / swift / AST / ASTContext . h <nl> class ASTContext final { <nl> / / / Each kind and SourceFile has its own cache for a Type . <nl> Type & getDefaultTypeRequestCache ( SourceFile * , KnownProtocolKind ) ; <nl> <nl> + public : <nl> + / / / Require that the library intrinsics for working with Optional < T > <nl> + / / / exist . <nl> + bool requireOptionalIntrinsics ( SourceLoc loc ) ; <nl> + <nl> + / / / Require that the library intrinsics for working with <nl> + / / / UnsafeMutablePointer < T > exist . <nl> + bool requirePointerArgumentIntrinsics ( SourceLoc loc ) ; <nl> + <nl> + / / / Require that the library intrinsics for creating <nl> + / / / array literals exist . <nl> + bool requireArrayLiteralIntrinsics ( SourceLoc loc ) ; <nl> + <nl> private : <nl> friend Decl ; <nl> Optional < RawComment > getRawComment ( const Decl * D ) ; <nl> mmm a / lib / AST / ASTContext . cpp <nl> ppp b / lib / AST / ASTContext . cpp <nl> void ASTContext : : setSideCachedPropertyWrapperBackingPropertyType ( <nl> getImpl ( ) . PropertyWrapperBackingVarTypes [ var ] = type ; <nl> } <nl> <nl> + bool ASTContext : : requireOptionalIntrinsics ( SourceLoc loc ) { <nl> + if ( hasOptionalIntrinsics ( ) ) <nl> + return false ; <nl> + <nl> + Diags . diagnose ( loc , diag : : optional_intrinsics_not_found ) ; <nl> + return true ; <nl> + } <nl> + <nl> + bool ASTContext : : requirePointerArgumentIntrinsics ( SourceLoc loc ) { <nl> + if ( hasPointerArgumentIntrinsics ( ) ) <nl> + return false ; <nl> + <nl> + Diags . diagnose ( loc , diag : : pointer_argument_intrinsics_not_found ) ; <nl> + return true ; <nl> + } <nl> + <nl> + bool ASTContext : : requireArrayLiteralIntrinsics ( SourceLoc loc ) { <nl> + if ( hasArrayLiteralIntrinsics ( ) ) <nl> + return false ; <nl> + <nl> + Diags . diagnose ( loc , diag : : array_literal_intrinsics_not_found ) ; <nl> + return true ; <nl> + } <nl> + <nl> VarDecl * VarDecl : : getOriginalWrappedProperty ( <nl> Optional < PropertyWrapperSynthesizedPropertyKind > kind ) const { <nl> if ( ! Bits . VarDecl . IsPropertyWrapperBackingProperty ) <nl> mmm a / lib / Sema / CSGen . cpp <nl> ppp b / lib / Sema / CSGen . cpp <nl> namespace { <nl> <nl> case MagicIdentifierLiteralExpr : : DSOHandle : { <nl> / / # dsohandle has type UnsafeMutableRawPointer . <nl> - auto & tc = CS . getTypeChecker ( ) ; <nl> - if ( tc . requirePointerArgumentIntrinsics ( expr - > getLoc ( ) ) ) <nl> + auto & ctx = CS . getASTContext ( ) ; <nl> + if ( ctx . requirePointerArgumentIntrinsics ( expr - > getLoc ( ) ) ) <nl> return nullptr ; <nl> <nl> - auto unsafeRawPointer = <nl> - CS . getASTContext ( ) . getUnsafeRawPointerDecl ( ) ; <nl> + auto unsafeRawPointer = ctx . getUnsafeRawPointerDecl ( ) ; <nl> return unsafeRawPointer - > getDeclaredType ( ) ; <nl> } <nl> } <nl> namespace { <nl> / / / worth QoI efforts . <nl> Type getOptionalType ( SourceLoc optLoc , Type valueTy ) { <nl> auto optTy = CS . getTypeChecker ( ) . getOptionalType ( optLoc , valueTy ) ; <nl> - if ( ! optTy | | CS . getTypeChecker ( ) . requireOptionalIntrinsics ( optLoc ) ) <nl> + if ( ! optTy | | CS . getASTContext ( ) . requireOptionalIntrinsics ( optLoc ) ) <nl> return Type ( ) ; <nl> <nl> return optTy ; <nl> mmm a / lib / Sema / TypeCheckExpr . cpp <nl> ppp b / lib / Sema / TypeCheckExpr . cpp <nl> static Expr * foldSequence ( DeclContext * DC , <nl> return makeBinOp ( Ctx , op1 . op , LHS , RHS , op1 . precedence , S . empty ( ) ) ; <nl> } <nl> <nl> - bool TypeChecker : : requireOptionalIntrinsics ( SourceLoc loc ) { <nl> - if ( Context . hasOptionalIntrinsics ( ) ) return false ; <nl> - <nl> - diagnose ( loc , diag : : optional_intrinsics_not_found ) ; <nl> - return true ; <nl> - } <nl> - <nl> - bool TypeChecker : : requirePointerArgumentIntrinsics ( SourceLoc loc ) { <nl> - if ( Context . hasPointerArgumentIntrinsics ( ) ) return false ; <nl> - <nl> - diagnose ( loc , diag : : pointer_argument_intrinsics_not_found ) ; <nl> - return true ; <nl> - } <nl> - <nl> - bool TypeChecker : : requireArrayLiteralIntrinsics ( SourceLoc loc ) { <nl> - if ( Context . hasArrayLiteralIntrinsics ( ) ) return false ; <nl> - <nl> - diagnose ( loc , diag : : array_literal_intrinsics_not_found ) ; <nl> - return true ; <nl> - } <nl> - <nl> Expr * TypeChecker : : buildCheckedRefExpr ( VarDecl * value , DeclContext * UseDC , <nl> DeclNameLoc loc , bool Implicit ) { <nl> auto type = TypeChecker : : getUnopenedTypeOfReference ( value , Type ( ) , UseDC ) ; <nl> mmm a / lib / Sema / TypeCheckStmt . cpp <nl> ppp b / lib / Sema / TypeCheckStmt . cpp <nl> class StmtChecker : public StmtVisitor < StmtChecker , Stmt * > { <nl> } <nl> <nl> / / Working with iterators requires Optional . <nl> - if ( TC . requireOptionalIntrinsics ( S - > getForLoc ( ) ) ) <nl> + if ( getASTContext ( ) . requireOptionalIntrinsics ( S - > getForLoc ( ) ) ) <nl> return nullptr ; <nl> <nl> / / Gather the witnesses from the Iterator protocol conformance , which <nl> mmm a / lib / Sema / TypeChecker . h <nl> ppp b / lib / Sema / TypeChecker . h <nl> class TypeChecker final { <nl> std : : function < Type ( Expr * ) > getType , <nl> std : : function < void ( Expr * , Type ) > setType ) ; <nl> <nl> - / / / Require that the library intrinsics for working with Optional < T > <nl> - / / / exist . <nl> - bool requireOptionalIntrinsics ( SourceLoc loc ) ; <nl> - <nl> - / / / Require that the library intrinsics for working with <nl> - / / / UnsafeMutablePointer < T > exist . <nl> - bool requirePointerArgumentIntrinsics ( SourceLoc loc ) ; <nl> - <nl> - / / / Require that the library intrinsics for creating <nl> - / / / array literals exist . <nl> - bool requireArrayLiteralIntrinsics ( SourceLoc loc ) ; <nl> - <nl> / / / Determine whether the given type contains the given protocol . <nl> / / / <nl> / / / \ param DC The context in which to check conformance . This affects , for <nl>
Move the diagnosing entrypoints for intrinsics
apple/swift
51b17470e3acce302718e106c96495d0a6359004
2019-11-06T23:08:59Z
mmm a / lib / Driver / Driver . cpp <nl> ppp b / lib / Driver / Driver . cpp <nl> getDriverBatchCount ( llvm : : opt : : InputArgList & ArgList , <nl> return None ; <nl> } <nl> <nl> + static bool computeIncremental ( const llvm : : opt : : InputArgList * ArgList , <nl> + const bool ShowIncrementalBuildDecisions ) { <nl> + if ( ! ArgList - > hasArg ( options : : OPT_incremental ) ) <nl> + return false ; <nl> + <nl> + const char * ReasonToDisable = <nl> + ArgList - > hasArg ( options : : OPT_whole_module_optimization ) <nl> + ? " is not compatible with whole module optimization . " <nl> + : ArgList - > hasArg ( options : : OPT_embed_bitcode ) <nl> + ? " is not currently compatible with embedding LLVM IR bitcode . " <nl> + : nullptr ; <nl> + <nl> + if ( ! ReasonToDisable ) <nl> + return true ; <nl> + <nl> + if ( ShowIncrementalBuildDecisions ) { <nl> + llvm : : outs ( ) < < " Incremental compilation has been disabled , because it " <nl> + < < ReasonToDisable ; <nl> + } <nl> + return false ; <nl> + } <nl> + <nl> + static std : : string <nl> + computeWorkingDirectory ( const llvm : : opt : : InputArgList * ArgList ) { <nl> + if ( auto * A = ArgList - > getLastArg ( options : : OPT_working_directory ) ) { <nl> + SmallString < 128 > workingDirectory ; <nl> + workingDirectory = A - > getValue ( ) ; <nl> + llvm : : sys : : fs : : make_absolute ( workingDirectory ) ; <nl> + std : : string result = workingDirectory . str ( ) . str ( ) ; <nl> + return result ; <nl> + } <nl> + return std : : string ( ) ; <nl> + } <nl> + <nl> + static std : : unique_ptr < UnifiedStatsReporter > <nl> + createStatsReporter ( const llvm : : opt : : InputArgList * ArgList , <nl> + const InputFileList & Inputs , const OutputInfo OI , <nl> + StringRef DefaultTargetTriple ) { <nl> + const Arg * A = ArgList - > getLastArgNoClaim ( options : : OPT_stats_output_dir ) ; <nl> + if ( ! A ) <nl> + return nullptr ; <nl> + <nl> + StringRef OptType ; <nl> + if ( const Arg * OptA = ArgList - > getLastArgNoClaim ( options : : OPT_O_Group ) ) { <nl> + OptType = OptA - > getSpelling ( ) ; <nl> + } <nl> + StringRef InputName ; <nl> + if ( Inputs . size ( ) = = 1 ) { <nl> + InputName = Inputs [ 0 ] . second - > getSpelling ( ) ; <nl> + } <nl> + StringRef OutputType = file_types : : getTypeTempSuffix ( OI . CompilerOutputType ) ; <nl> + return llvm : : make_unique < UnifiedStatsReporter > ( " swift - driver " , <nl> + OI . ModuleName , <nl> + InputName , <nl> + DefaultTargetTriple , <nl> + OutputType , <nl> + OptType , <nl> + A - > getValue ( ) ) ; <nl> + } <nl> + <nl> + static bool <nl> + computeContinueBuildingAfterErrors ( const bool BatchMode , <nl> + const llvm : : opt : : InputArgList * ArgList ) { <nl> + / / Note : Batch mode handling of serialized diagnostics requires that all <nl> + / / batches get to run , in order to make sure that all diagnostics emitted <nl> + / / during the compilation end up in at least one serialized diagnostic file . <nl> + / / Therefore , treat batch mode as implying - continue - building - after - errors . <nl> + / / ( This behavior could be limited to only when serialized diagnostics are <nl> + / / being emitted , but this seems more consistent and less surprising for <nl> + / / users . ) <nl> + / / FIXME : We don ' t really need ( or want ) a full ContinueBuildingAfterErrors . <nl> + / / If we fail to precompile a bridging header , for example , there ' s no need <nl> + / / to go on to compilation of source files , and if compilation of source files <nl> + / / fails , we shouldn ' t try to link . Instead , we ' d want to let all jobs finish <nl> + / / but not schedule any new ones . <nl> + return BatchMode | | <nl> + ArgList - > hasArg ( options : : OPT_continue_building_after_errors ) ; <nl> + <nl> + } <nl> + <nl> std : : unique_ptr < Compilation > <nl> Driver : : buildCompilation ( const ToolChain & TC , <nl> std : : unique_ptr < llvm : : opt : : InputArgList > ArgList ) { <nl> Driver : : buildCompilation ( const ToolChain & TC , <nl> / / Claim - - driver - mode here , since it ' s already been handled . <nl> ( void ) ArgList - > hasArg ( options : : OPT_driver_mode ) ; <nl> <nl> - bool DriverPrintActions = ArgList - > hasArg ( options : : OPT_driver_print_actions ) ; <nl> - bool DriverPrintOutputFileMap = <nl> - ArgList - > hasArg ( options : : OPT_driver_print_output_file_map ) ; <nl> - bool DriverPrintDerivedOutputFileMap = <nl> - ArgList - > hasArg ( options : : OPT_driver_print_derived_output_file_map ) ; <nl> DriverPrintBindings = ArgList - > hasArg ( options : : OPT_driver_print_bindings ) ; <nl> - bool ShowIncrementalBuildDecisions = <nl> - ArgList - > hasArg ( options : : OPT_driver_show_incremental ) ; <nl> - bool ShowJobLifecycle = <nl> - ArgList - > hasArg ( options : : OPT_driver_show_job_lifecycle ) ; <nl> - unsigned DriverBatchSeed = getDriverBatchSeed ( * ArgList , Diags ) ; <nl> - Optional < unsigned > DriverBatchCount = getDriverBatchCount ( * ArgList , Diags ) ; <nl> - bool DriverForceOneBatchRepartition = <nl> - ArgList - > hasArg ( options : : OPT_driver_force_one_batch_repartition ) ; <nl> - <nl> - bool Incremental = ArgList - > hasArg ( options : : OPT_incremental ) ; <nl> - if ( ArgList - > hasArg ( options : : OPT_whole_module_optimization ) ) { <nl> - if ( Incremental & & ShowIncrementalBuildDecisions ) { <nl> - llvm : : outs ( ) < < " Incremental compilation has been disabled , because it " <nl> - < < " is not compatible with whole module optimization . " ; <nl> - } <nl> - Incremental = false ; <nl> - } <nl> - if ( ArgList - > hasArg ( options : : OPT_embed_bitcode ) ) { <nl> - if ( Incremental & & ShowIncrementalBuildDecisions ) { <nl> - llvm : : outs ( ) < < " Incremental compilation has been disabled , because it " <nl> - < < " is not currently compatible with embedding LLVM IR " <nl> - < < " bitcode . " ; <nl> - } <nl> - Incremental = false ; <nl> - } <nl> - <nl> - bool SaveTemps = ArgList - > hasArg ( options : : OPT_save_temps ) ; <nl> - bool ShowDriverTimeCompilation = <nl> - ArgList - > hasArg ( options : : OPT_driver_time_compilation ) ; <nl> <nl> - SmallString < 128 > workingDirectory ; <nl> - if ( auto * A = ArgList - > getLastArg ( options : : OPT_working_directory ) ) { <nl> - workingDirectory = A - > getValue ( ) ; <nl> - llvm : : sys : : fs : : make_absolute ( workingDirectory ) ; <nl> - } <nl> + const std : : string workingDirectory = computeWorkingDirectory ( ArgList . get ( ) ) ; <nl> <nl> std : : unique_ptr < DerivedArgList > TranslatedArgList ( <nl> translateInputAndPathArgs ( * ArgList , workingDirectory ) ) ; <nl> Driver : : buildCompilation ( const ToolChain & TC , <nl> OI . CompilerMode = computeCompilerMode ( * TranslatedArgList , Inputs , BatchMode ) ; <nl> buildOutputInfo ( TC , * TranslatedArgList , BatchMode , Inputs , OI ) ; <nl> <nl> - / / Note : Batch mode handling of serialized diagnostics requires that all <nl> - / / batches get to run , in order to make sure that all diagnostics emitted <nl> - / / during the compilation end up in at least one serialized diagnostic file . <nl> - / / Therefore , treat batch mode as implying - continue - building - after - errors . <nl> - / / ( This behavior could be limited to only when serialized diagnostics are <nl> - / / being emitted , but this seems more consistent and less surprising for <nl> - / / users . ) <nl> - / / FIXME : We don ' t really need ( or want ) a full ContinueBuildingAfterErrors . <nl> - / / If we fail to precompile a bridging header , for example , there ' s no need <nl> - / / to go on to compilation of source files , and if compilation of source files <nl> - / / fails , we shouldn ' t try to link . Instead , we ' d want to let all jobs finish <nl> - / / but not schedule any new ones . <nl> - const bool ContinueBuildingAfterErrors = <nl> - BatchMode | | ArgList - > hasArg ( options : : OPT_continue_building_after_errors ) ; <nl> - <nl> if ( Diags . hadAnyError ( ) ) <nl> return nullptr ; <nl> <nl> - std : : unique_ptr < UnifiedStatsReporter > StatsReporter ; <nl> - if ( const Arg * A = <nl> - ArgList - > getLastArgNoClaim ( options : : OPT_stats_output_dir ) ) { <nl> - StringRef OptType ; <nl> - if ( const Arg * OptA = ArgList - > getLastArgNoClaim ( options : : OPT_O_Group ) ) { <nl> - OptType = OptA - > getSpelling ( ) ; <nl> - } <nl> - StringRef InputName ; <nl> - if ( Inputs . size ( ) = = 1 ) { <nl> - InputName = Inputs [ 0 ] . second - > getSpelling ( ) ; <nl> - } <nl> - StringRef OutputType = file_types : : getTypeTempSuffix ( OI . CompilerOutputType ) ; <nl> - StatsReporter = llvm : : make_unique < UnifiedStatsReporter > ( " swift - driver " , <nl> - OI . ModuleName , <nl> - InputName , <nl> - DefaultTargetTriple , <nl> - OutputType , <nl> - OptType , <nl> - A - > getValue ( ) ) ; <nl> - } <nl> - <nl> assert ( OI . CompilerOutputType ! = file_types : : ID : : TY_INVALID & & <nl> " buildOutputInfo ( ) must set a valid output type ! " ) ; <nl> <nl> Driver : : buildCompilation ( const ToolChain & TC , <nl> if ( Diags . hadAnyError ( ) ) <nl> return nullptr ; <nl> <nl> - if ( DriverPrintOutputFileMap ) { <nl> + if ( ArgList - > hasArg ( options : : OPT_driver_print_output_file_map ) ) { <nl> if ( OFM ) <nl> OFM - > dump ( llvm : : errs ( ) , true ) ; <nl> else <nl> Driver : : buildCompilation ( const ToolChain & TC , <nl> return nullptr ; <nl> } <nl> <nl> + const bool ShowIncrementalBuildDecisions = <nl> + ArgList - > hasArg ( options : : OPT_driver_show_incremental ) ; <nl> + const bool Incremental = <nl> + computeIncremental ( ArgList . get ( ) , ShowIncrementalBuildDecisions ) ; <nl> + <nl> std : : string buildRecordPath ; <nl> bool outputBuildRecordForModuleOnlyBuild = false ; <nl> getCompilationRecordPath ( buildRecordPath , outputBuildRecordForModuleOnlyBuild , <nl> Driver : : buildCompilation ( const ToolChain & TC , <nl> llvm_unreachable ( " Unknown OutputLevel argument ! " ) ; <nl> } <nl> <nl> - std : : unique_ptr < Compilation > C ( <nl> - new Compilation ( Diags , TC , OI , Level , <nl> - std : : move ( ArgList ) , <nl> - std : : move ( TranslatedArgList ) , <nl> - std : : move ( Inputs ) , <nl> - buildRecordPath , <nl> - outputBuildRecordForModuleOnlyBuild , <nl> - ArgsHash , <nl> - StartTime , <nl> - LastBuildTime , <nl> - DriverFilelistThreshold , <nl> - Incremental , <nl> - BatchMode , <nl> - DriverBatchSeed , <nl> - DriverBatchCount , <nl> - DriverForceOneBatchRepartition , <nl> - SaveTemps , <nl> - ShowDriverTimeCompilation , <nl> - std : : move ( StatsReporter ) ) ) ; <nl> + <nl> + / / About to move argument list , so capture some flags that will be needed <nl> + / / later . <nl> + const bool DriverPrintActions = <nl> + ArgList - > hasArg ( options : : OPT_driver_print_actions ) ; <nl> + const bool DriverPrintDerivedOutputFileMap = <nl> + ArgList - > hasArg ( options : : OPT_driver_print_derived_output_file_map ) ; <nl> + const bool ContinueBuildingAfterErrors = <nl> + computeContinueBuildingAfterErrors ( BatchMode , ArgList . get ( ) ) ; <nl> + const bool ShowJobLifecycle = <nl> + ArgList - > hasArg ( options : : OPT_driver_show_job_lifecycle ) ; <nl> + <nl> + / / In order to confine the values below , while still moving the argument <nl> + / / list , and preserving the interface to Compilation , enclose the call to the <nl> + / / constructor in a block : <nl> + std : : unique_ptr < Compilation > C ; <nl> + { <nl> + const unsigned DriverBatchSeed = getDriverBatchSeed ( * ArgList , Diags ) ; <nl> + const Optional < unsigned > DriverBatchCount = getDriverBatchCount ( * ArgList , Diags ) ; <nl> + const bool DriverForceOneBatchRepartition = <nl> + ArgList - > hasArg ( options : : OPT_driver_force_one_batch_repartition ) ; <nl> + const bool SaveTemps = ArgList - > hasArg ( options : : OPT_save_temps ) ; <nl> + const bool ShowDriverTimeCompilation = <nl> + ArgList - > hasArg ( options : : OPT_driver_time_compilation ) ; <nl> + std : : unique_ptr < UnifiedStatsReporter > StatsReporter = <nl> + createStatsReporter ( ArgList . get ( ) , Inputs , OI , DefaultTargetTriple ) ; <nl> + <nl> + C = llvm : : make_unique < Compilation > ( <nl> + Diags , TC , OI , Level , <nl> + std : : move ( ArgList ) , <nl> + std : : move ( TranslatedArgList ) , <nl> + std : : move ( Inputs ) , <nl> + buildRecordPath , <nl> + outputBuildRecordForModuleOnlyBuild , <nl> + ArgsHash , <nl> + StartTime , <nl> + LastBuildTime , <nl> + DriverFilelistThreshold , <nl> + Incremental , <nl> + BatchMode , <nl> + DriverBatchSeed , <nl> + DriverBatchCount , <nl> + DriverForceOneBatchRepartition , <nl> + SaveTemps , <nl> + ShowDriverTimeCompilation , <nl> + std : : move ( StatsReporter ) ) ; <nl> + } <nl> <nl> / / Construct the graph of Actions . <nl> SmallVector < const Action * , 8 > TopLevelActions ; <nl>
Merge pull request from davidungar / buildCompilation - refactor - 1
apple/swift
c1c9cb8f5dceb65f0d159b9d085374eecedd2288
2018-06-27T20:57:34Z
mmm a / ci / lint / 04_install . sh <nl> ppp b / ci / lint / 04_install . sh <nl> export LC_ALL = C <nl> <nl> travis_retry pip3 install codespell = = 1 . 15 . 0 <nl> travis_retry pip3 install flake8 = = 3 . 7 . 8 <nl> - travis_retry pip3 install vulture = = 0 . 29 <nl> + travis_retry pip3 install vulture = = 1 . 0 <nl> <nl> SHELLCHECK_VERSION = v0 . 6 . 0 <nl> curl - s " https : / / storage . googleapis . com / shellcheck / shellcheck - $ { SHELLCHECK_VERSION } . linux . x86_64 . tar . xz " | tar - - xz - xf - - - directory / tmp / <nl> mmm a / test / lint / lint - python - dead - code - whitelist <nl> ppp b / test / lint / lint - python - dead - code - whitelist <nl> _ . converter # unused attribute ( test / functional / test_framework / test_framework . p <nl> _ . daemon # unused attribute ( test / functional / test_framework / socks5 . py ) <nl> data_received # unused function ( test / functional / test_framework / mininode . py ) <nl> DuplicateInput # unused class ( test / functional / data / invalid_txs . py ) <nl> + DisabledOpcodeTemplates # unused class ( test / functional / data / invalid_txs . py ) <nl> _ . filename # unused attribute ( contrib / macdeploy / custom_dsstore . py ) <nl> InvalidOPIFConstruction # unused class ( test / functional / data / invalid_txs . py ) <nl> _ . is_compressed # unused property ( test / functional / test_framework / key . py ) <nl>
Merge : test : lint : Add DisabledOpcodeTemplates to whitelist
bitcoin/bitcoin
cfcaa9759e23d10a766d50558d369ad4110129f8
2019-09-18T16:27:02Z
mmm a / docs / CHANGELOG . txt <nl> ppp b / docs / CHANGELOG . txt <nl> Other Changes : <nl> BeginMenu ( ) / EndMenu ( ) or BeginPopup / EndPopup ( ) . ( # 3223 , # 1207 ) [ @ rokups ] <nl> - Drag and Drop : Fixed unintended fallback " . . . " tooltip display during drag operation when <nl> drag source uses _SourceNoPreviewTooltip flags . ( # 3160 ) [ @ rokups ] <nl> - - ImDrawList : Fixed an issue when draw command merging or cancelling while crossing the VtxOffset <nl> - boundary would lead to draw command being emitted with wrong VtxOffset value . ( # 3129 , # 3163 , # 3232 ) <nl> + - ImDrawList : Fixed an issue where draw command merging or primitive unreserve while crossing the <nl> + VtxOffset boundary would lead to draw commands with wrong VtxOffset . ( # 3129 , # 3163 , # 3232 , # 2591 ) <nl> [ @ thedmd , @ Shironekoben , @ sergeyn , @ ocornut ] <nl> + - ImDrawList , ImDrawListSplitter , Columns : Fixed an issue where starting a split when current <nl> + VtxOffset was not zero would lead to draw commands with wrong VtxOffset . ( # 2591 ) <nl> - Misc , Freetype : Fix for rare case where FT_Get_Char_Index ( ) succeed but FT_Load_Glyph ( ) fails . <nl> - CI : Added CI test to verify we ' re never accidentally dragging libstdc + + ( on some compiler setups , <nl> static constructors for non - pod data seems to drag in libstdc + + due to thread - safety concerns ) . <nl> Other Changes : <nl> - Log / Capture : Fixed BeginTabItem ( ) label not being included in a text log / capture . <nl> - ImDrawList : Added ImDrawCmd : : VtxOffset value to support large meshes ( 64k + vertices ) using 16 - bit indices . <nl> The renderer back - end needs to set ' io . BackendFlags | = ImGuiBackendFlags_RendererHasVtxOffset ' to enable <nl> - this , and honor the ImDrawCmd : : VtxOffset field . Otherwise the value will always be zero . <nl> + this , and honor the ImDrawCmd : : VtxOffset field . Otherwise the value will always be zero . ( # 2591 ) <nl> This has the advantage of preserving smaller index buffers and allowing to execute on hardware that do not <nl> support 32 - bit indices . Most examples back - ends have been modified to support the VtxOffset field . <nl> - ImDrawList : Added ImDrawCmd : : IdxOffset value , equivalent to summing element count for each draw command . <nl> - This is provided for convenience and consistency with VtxOffset . <nl> + This is provided for convenience and consistency with VtxOffset . ( # 2591 ) <nl> - ImDrawCallback : Allow to override the signature of ImDrawCallback by # define - ing it . This is meant to <nl> facilitate custom rendering back - ends passing local render - specific data to the draw callback . <nl> - ImFontAtlas : FreeType : Added RasterizerFlags : : Monochrome flag to disable font anti - aliasing . Combine <nl> Other Changes : <nl> dealing with Win32 , and to facilitate integration in custom engines . ( # 2546 ) [ @ andrewwillmott ] <nl> - Backends : OSX : imgui_impl_osx : Added mouse cursor support . ( # 2585 , # 1873 ) [ @ actboy168 ] <nl> - Examples / Backends : DirectX9 / 10 / 11 / 12 , Metal , Vulkan , OpenGL3 ( Desktop GL only ) : Added support for large meshes <nl> - ( 64k + vertices ) with 16 - bit indices , enable ' ImGuiBackendFlags_RendererHasVtxOffset ' in those back - ends . <nl> + ( 64k + vertices ) with 16 - bit indices , enable ' ImGuiBackendFlags_RendererHasVtxOffset ' in those back - ends . ( # 2591 ) <nl> - Examples / Backends : Don ' t filter characters under 0x10000 before calling io . AddInputCharacter ( ) , <nl> the filtering is done in io . AddInputCharacter ( ) itself . This is in prevision for fuller Unicode <nl> support . ( # 2538 , # 2541 ) <nl> mmm a / imgui_draw . cpp <nl> ppp b / imgui_draw . cpp <nl> void ImDrawListSplitter : : Split ( ImDrawList * draw_list , int channels_count ) <nl> ImDrawCmd draw_cmd ; <nl> draw_cmd . ClipRect = draw_list - > _ClipRectStack . back ( ) ; <nl> draw_cmd . TextureId = draw_list - > _TextureIdStack . back ( ) ; <nl> + draw_cmd . VtxOffset = draw_list - > _VtxCurrentOffset ; <nl> _Channels [ i ] . _CmdBuffer . push_back ( draw_cmd ) ; <nl> } <nl> } <nl>
ImDrawList , ImDrawListSplitter , Columns : Fixed an issue where starting a split when current VtxOffset was not zero would lead to draw commands with wrong VtxOffset . (
ocornut/imgui
f6120f8e16eefcdb37b63974e6915a3dd35414be
2020-06-06T19:31:31Z
mmm a / shell / shell_utils . cpp <nl> ppp b / shell / shell_utils . cpp <nl> namespace mongo { <nl> env [ 1 ] = NULL ; <nl> # endif / / HEAP_CHECKING <nl> <nl> - execvpe ( argv [ 0 ] , const_cast < char * * > ( argv ) , const_cast < char * * > ( env ) ) ; <nl> + execve ( argv [ 0 ] , const_cast < char * * > ( argv ) , const_cast < char * * > ( env ) ) ; <nl> <nl> cout < < " Unable to start program " < < argv [ 0 ] < < ' ' < < errnoWithDescription ( ) < < endl ; <nl> : : _Exit ( - 1 ) ; <nl>
Fix build ( broken at f800c20958ba03e25e85 )
mongodb/mongo
64210308ba3a00377d211ae2e3499f65b0962591
2010-08-10T17:56:55Z
mmm a / tensorflow / contrib / cmake / tf_tests . cmake <nl> ppp b / tensorflow / contrib / cmake / tf_tests . cmake <nl> if ( tensorflow_BUILD_PYTHON_TESTS ) <nl> # Newly running on Windows since TensorBoard backend move . Fail on Windows and need debug . <nl> " $ { tensorflow_source_dir } / tensorflow / tensorboard / backend / event_processing / directory_watcher_test . py " <nl> " $ { tensorflow_source_dir } / tensorflow / tensorboard / backend / event_processing / event_multiplexer_test . py " <nl> - ) <nl> + " $ { tensorflow_source_dir } / tensorflow / contrib / data / python / kernel_tests / dataset_constructor_op_test . py " # Segfaults on Windows . <nl> + ) <nl> endif ( ) <nl> list ( REMOVE_ITEM tf_test_src_py $ { tf_test_src_py_exclude } ) <nl> <nl>
[ Input pipeline ] Disable failing test on Windows while we investigate the cause .
tensorflow/tensorflow
c37b31d5cc716516a3bbb8946d62a7df401fbc0e
2017-05-17T18:32:58Z
mmm a / modules / gdscript / gd_editor . cpp <nl> ppp b / modules / gdscript / gd_editor . cpp <nl> static Ref < Reference > _get_parent_class ( GDCompletionContext & context ) { <nl> if ( script . is_null ( ) ) { <nl> return REF ( ) ; <nl> } <nl> - if ( script - > is_valid ( ) ) { <nl> + if ( ! script - > is_valid ( ) ) { <nl> <nl> return REF ( ) ; <nl> } <nl>
Fixed typo in gdscript autocompletion .
godotengine/godot
edaf77abd614d1260c6827f25045cc0473a08117
2017-03-17T22:27:44Z
mmm a / lib / Sema / CSApply . cpp <nl> ppp b / lib / Sema / CSApply . cpp <nl> namespace { <nl> } <nl> <nl> auto subscript = cast < SubscriptDecl > ( choice . getDecl ( ) ) ; <nl> + cs . TC . requestMemberLayout ( subscript ) ; <nl> <nl> auto & tc = cs . getTypeChecker ( ) ; <nl> auto baseTy = cs . getType ( base ) - > getRValueType ( ) ; <nl>
Sema : request member layout in the subscript_expr
apple/swift
b267c1254dd858047ad1a0ba1da473e8f35d60a6
2018-01-25T23:24:16Z
mmm a / html / admin / js / views / graphView . js <nl> ppp b / html / admin / js / views / graphView . js <nl> <nl> / * jslint indent : 2 , nomen : true , maxlen : 100 , sloppy : true , vars : true , white : true , plusplus : true , forin : true * / <nl> - / * global Backbone , $ , window , EJS , GraphViewerUI * / <nl> + / * global Backbone , $ , _ , window , EJS , GraphViewerUI * / <nl> <nl> window . graphView = Backbone . View . extend ( { <nl> el : ' # content ' , <nl> window . graphView = Backbone . View . extend ( { <nl> url : " / _api / graph " , <nl> contentType : " application / json " , <nl> success : function ( data ) { <nl> - console . log ( data ) ; <nl> self . graphs = _ . pluck ( data . graphs , " _key " ) ; <nl> self . render ( ) ; <nl> } <nl> mmm a / js / actions / api - transaction . js <nl> ppp b / js / actions / api - transaction . js <nl> var actions = require ( " org / arangodb / actions " ) ; <nl> / / / collections : { <nl> / / / write : " products " <nl> / / / } , <nl> - / / / action : " function ( ) { var db = require ( ' internal ' ) . db ; db . products . save ( { } ) ; return db . products . count ( ) ; } " <nl> + / / / action : " function ( ) { " + <nl> + / / / " var db = require ( ' internal ' ) . db ; " + <nl> + / / / " db . products . save ( { } ) ; " + <nl> + / / / " return db . products . count ( ) ; } " <nl> / / / } ; <nl> / / / <nl> / / / var response = logCurlRequest ( ' POST ' , url , body ) ; <nl> var actions = require ( " org / arangodb / actions " ) ; <nl> / / / collections : { <nl> / / / write : [ " products " , " materials " ] <nl> / / / } , <nl> - / / / action : " function ( ) { var db = require ( ' internal ' ) . db ; db . products . save ( { } ) ; db . materials . save ( { } ) ; return ' worked ! ' ; } " <nl> + / / / action : " function ( ) { " + <nl> + / / / " var db = require ( ' internal ' ) . db ; " + <nl> + / / / " db . products . save ( { } ) ; " + <nl> + / / / " db . materials . save ( { } ) ; " + <nl> + / / / " return ' worked ! ' ; } " <nl> / / / } ; <nl> / / / <nl> / / / var response = logCurlRequest ( ' POST ' , url , body ) ; <nl> var actions = require ( " org / arangodb / actions " ) ; <nl> / / / collections : { <nl> / / / write : " products " <nl> / / / } , <nl> - / / / action : " function ( ) { var db = require ( ' internal ' ) . db ; db . products . save ( { _key : ' abc ' } ) ; db . products . save ( { _key : ' abc ' } ) ; } " <nl> + / / / action : " function ( ) { " + <nl> + / / / " var db = require ( ' internal ' ) . db ; " + <nl> + / / / " db . products . save ( { _key : ' abc ' } ) ; " + <nl> + / / / " db . products . save ( { _key : ' abc ' } ) ; } " <nl> / / / } ; <nl> / / / <nl> / / / var response = logCurlRequest ( ' POST ' , url , body ) ; <nl>
fixed jslint warnings
arangodb/arangodb
227e6546c08f89e854b88f601a8c5a3207273479
2013-07-25T14:35:21Z
mmm a / include / swift / AST / Decl . h <nl> ppp b / include / swift / AST / Decl . h <nl> class PatternBindingEntry { <nl> } <nl> SourceRange getOrigInitRange ( ) const ; <nl> void setInit ( Expr * E ) ; <nl> + <nl> + / / / Retrieve the initializer as it was written in the source . <nl> + Expr * getInitAsWritten ( ) const { return InitCheckedAndRemoved . getPointer ( ) ; } <nl> + <nl> bool isInitializerChecked ( ) const { <nl> return InitCheckedAndRemoved . getInt ( ) . contains ( Flags : : Checked ) ; <nl> } <nl> mmm a / include / swift / AST / LazyResolver . h <nl> ppp b / include / swift / AST / LazyResolver . h <nl> class ProtocolDecl ; <nl> class Substitution ; <nl> class TypeDecl ; <nl> class ValueDecl ; <nl> + class VarDecl ; <nl> <nl> / / / Abstract interface used to lazily resolve aspects of the AST , such as the <nl> / / / types of declarations or protocol conformance structures . <nl> class LazyResolver { <nl> / / / Bind an extension to its extended type . <nl> virtual void bindExtension ( ExtensionDecl * ext ) = 0 ; <nl> <nl> + / / / Introduce the accessors for a ' lazy ' variable . <nl> + virtual void introduceLazyVarAccessors ( VarDecl * var ) = 0 ; <nl> + <nl> / / / Resolve the type of an extension . <nl> / / / <nl> / / / This can be called to ensure that the members of an extension can be <nl> class DelegatingLazyResolver : public LazyResolver { <nl> Principal . bindExtension ( ext ) ; <nl> } <nl> <nl> + void introduceLazyVarAccessors ( VarDecl * var ) override { <nl> + Principal . introduceLazyVarAccessors ( var ) ; <nl> + } <nl> + <nl> void resolveExtension ( ExtensionDecl * ext ) override { <nl> Principal . resolveExtension ( ext ) ; <nl> } <nl> mmm a / lib / AST / ASTScope . cpp <nl> ppp b / lib / AST / ASTScope . cpp <nl> void ASTScope : : expand ( ) const { <nl> <nl> / / Create a child for the initializer , if present . <nl> ASTScope * initChild = nullptr ; <nl> - if ( patternEntry . getInit ( ) & & <nl> - patternEntry . getInit ( ) - > getSourceRange ( ) . isValid ( ) ) { <nl> + if ( patternEntry . getInitAsWritten ( ) & & <nl> + patternEntry . getInitAsWritten ( ) - > getSourceRange ( ) . isValid ( ) ) { <nl> initChild = new ( ctx ) ASTScope ( ASTScopeKind : : PatternInitializer , this , <nl> patternBinding . decl , patternBinding . entry ) ; <nl> } <nl> void ASTScope : : expand ( ) const { <nl> / / normal case ) , add teh initializer child first . <nl> if ( initChild & & <nl> ctx . SourceMgr . isBeforeInBuffer ( <nl> - patternBinding . decl - > getInit ( patternBinding . entry ) - > getEndLoc ( ) , <nl> + patternEntry . getInitAsWritten ( ) - > getEndLoc ( ) , <nl> var - > getBracesRange ( ) . Start ) ) { <nl> addChild ( initChild ) ; <nl> initChild = nullptr ; <nl> void ASTScope : : expand ( ) const { <nl> break ; <nl> } <nl> <nl> - case ASTScopeKind : : PatternInitializer : <nl> + case ASTScopeKind : : PatternInitializer : { <nl> + const auto & patternEntry = <nl> + patternBinding . decl - > getPatternList ( ) [ patternBinding . entry ] ; <nl> + <nl> / / Create a child for the initializer expression . <nl> - if ( auto child = <nl> - createIfNeeded ( this , <nl> - patternBinding . decl - > getInit ( patternBinding . entry ) ) ) <nl> + if ( auto child = createIfNeeded ( this , patternEntry . getInitAsWritten ( ) ) ) <nl> addChild ( child ) ; <nl> break ; <nl> + } <nl> <nl> case ASTScopeKind : : AfterPatternBinding : { <nl> / / Create a child for the next pattern binding . <nl> SourceRange ASTScope : : getSourceRangeImpl ( ) const { <nl> return range ; <nl> } <nl> <nl> - case ASTScopeKind : : PatternInitializer : <nl> - return patternBinding . decl - > getInit ( patternBinding . entry ) - > getSourceRange ( ) ; <nl> + case ASTScopeKind : : PatternInitializer : { <nl> + const auto & patternEntry = <nl> + patternBinding . decl - > getPatternList ( ) [ patternBinding . entry ] ; <nl> + <nl> + return patternEntry . getInitAsWritten ( ) - > getSourceRange ( ) ; <nl> + } <nl> <nl> case ASTScopeKind : : AfterPatternBinding : { <nl> const auto & patternEntry = <nl> SmallVector < ValueDecl * , 4 > ASTScope : : getLocalBindings ( ) const { <nl> case ASTScopeKind : : DefaultArgument : <nl> case ASTScopeKind : : AbstractFunctionBody : <nl> case ASTScopeKind : : PatternBinding : <nl> - case ASTScopeKind : : PatternInitializer : <nl> case ASTScopeKind : : BraceStmt : <nl> case ASTScopeKind : : IfStmt : <nl> case ASTScopeKind : : GuardStmt : <nl> SmallVector < ValueDecl * , 4 > ASTScope : : getLocalBindings ( ) const { <nl> result . push_back ( cast < ValueDecl > ( decl ) ) ; <nl> break ; <nl> <nl> + case ASTScopeKind : : PatternInitializer : <nl> + / / ' self ' is available within the pattern initializer of a ' lazy ' variable . <nl> + if ( auto singleVar = patternBinding . decl - > getSingleVar ( ) ) { <nl> + if ( singleVar - > getAttrs ( ) . hasAttribute < LazyAttr > ( ) & & <nl> + singleVar - > getDeclContext ( ) - > isTypeContext ( ) ) { <nl> + / / If there is no getter ( yet ) , add them . <nl> + if ( ! singleVar - > getGetter ( ) ) { <nl> + ASTContext & ctx = singleVar - > getASTContext ( ) ; <nl> + if ( auto resolver = ctx . getLazyResolver ( ) ) <nl> + resolver - > introduceLazyVarAccessors ( singleVar ) ; <nl> + } <nl> + <nl> + / / Add the getter ' s ' self ' . <nl> + if ( auto getter = singleVar - > getGetter ( ) ) <nl> + if ( auto self = getter - > getImplicitSelfDecl ( ) ) <nl> + result . push_back ( self ) ; <nl> + } <nl> + } <nl> + break ; <nl> + <nl> case ASTScopeKind : : Closure : <nl> / / Note : Parameters all at once is different from functions , but it ' s not <nl> / / relevant because there are no default arguments . <nl> mmm a / lib / AST / NameLookup . cpp <nl> ppp b / lib / AST / NameLookup . cpp <nl> static DeclVisibilityKind getLocalDeclVisibilityKind ( const ASTScope * scope ) { <nl> case ASTScopeKind : : AbstractFunctionBody : <nl> case ASTScopeKind : : DefaultArgument : <nl> case ASTScopeKind : : PatternBinding : <nl> - case ASTScopeKind : : PatternInitializer : <nl> case ASTScopeKind : : BraceStmt : <nl> case ASTScopeKind : : IfStmt : <nl> case ASTScopeKind : : GuardStmt : <nl> static DeclVisibilityKind getLocalDeclVisibilityKind ( const ASTScope * scope ) { <nl> <nl> case ASTScopeKind : : AbstractFunctionParams : <nl> case ASTScopeKind : : Closure : <nl> + case ASTScopeKind : : PatternInitializer : / / lazy var ' self ' <nl> return DeclVisibilityKind : : FunctionParameter ; <nl> <nl> case ASTScopeKind : : AfterPatternBinding : <nl> UnqualifiedLookup : : UnqualifiedLookup ( DeclName Name , DeclContext * DC , <nl> for ( auto currentScope = lookupScope ; currentScope ; <nl> currentScope = currentScope - > getParent ( ) ) { <nl> / / Perform local lookup within this scope . <nl> - for ( auto local : currentScope - > getLocalBindings ( ) ) { <nl> + auto localBindings = currentScope - > getLocalBindings ( ) ; <nl> + for ( auto local : localBindings ) { <nl> Consumer . foundDecl ( local , <nl> getLocalDeclVisibilityKind ( currentScope ) ) ; <nl> } <nl> UnqualifiedLookup : : UnqualifiedLookup ( DeclName Name , DeclContext * DC , <nl> / / Pattern binding initializers are only interesting insofar as they <nl> / / affect lookup in an enclosing nominal type or extension thereof . <nl> if ( auto * bindingInit = dyn_cast < PatternBindingInitializer > ( dc ) ) { <nl> - if ( auto binding = bindingInit - > getBinding ( ) ) <nl> + if ( auto binding = bindingInit - > getBinding ( ) ) { <nl> lookupInNominalIsStatic = binding - > isStatic ( ) ; <nl> <nl> - / / FIXME : Look for ' self ' for a lazy variable initializer . <nl> + / / Look for ' self ' for a lazy variable initializer . <nl> + if ( auto singleVar = binding - > getSingleVar ( ) ) <nl> + / / We only care about lazy variables . <nl> + if ( singleVar - > getAttrs ( ) . hasAttribute < LazyAttr > ( ) ) { <nl> + <nl> + / / ' self ' will be listed in the local bindings . <nl> + for ( auto local : localBindings ) { <nl> + auto param = dyn_cast < ParamDecl > ( local ) ; <nl> + if ( ! param ) continue ; <nl> + <nl> + <nl> + / / If we have a variable that ' s the implicit self of its enclosing <nl> + / / context , mark it as ' self ' . <nl> + if ( auto func = dyn_cast < FuncDecl > ( param - > getDeclContext ( ) ) ) { <nl> + if ( param = = func - > getImplicitSelfDecl ( ) ) { <nl> + selfDecl = param ; <nl> + break ; <nl> + } <nl> + } <nl> + } <nl> + } <nl> + } <nl> continue ; <nl> } <nl> <nl> mmm a / lib / Sema / CodeSynthesis . cpp <nl> ppp b / lib / Sema / CodeSynthesis . cpp <nl> void swift : : maybeAddMaterializeForSet ( AbstractStorageDecl * storage , <nl> addMaterializeForSet ( storage , TC ) ; <nl> } <nl> <nl> + void TypeChecker : : introduceLazyVarAccessors ( VarDecl * var ) { <nl> + maybeAddAccessorsToVariable ( var , * this ) ; <nl> + } <nl> + <nl> void swift : : maybeAddAccessorsToVariable ( VarDecl * var , TypeChecker & TC ) { <nl> / / If we ' ve already synthesized accessors or are currently in the process <nl> / / of doing so , don ' t proceed . <nl> mmm a / lib / Sema / TypeChecker . h <nl> ppp b / lib / Sema / TypeChecker . h <nl> class TypeChecker final : public LazyResolver { <nl> handleExternalDecl ( nominal ) ; <nl> } <nl> <nl> + / / / Introduce the accessors for a ' lazy ' variable . <nl> + void introduceLazyVarAccessors ( VarDecl * var ) override ; <nl> + <nl> / / / Infer default value witnesses for all requirements in the given protocol . <nl> void inferDefaultWitnesses ( ProtocolDecl * proto ) ; <nl> <nl> mmm a / test / NameBinding / scope_map . swift <nl> ppp b / test / NameBinding / scope_map . swift <nl> func localPatternsWithSharedType ( ) { <nl> let i , j , k : Int <nl> } <nl> <nl> + class LazyProperties { <nl> + var value : Int = 17 <nl> + <nl> + lazy var prop : Int = self . value <nl> + } <nl> + <nl> / / RUN : not % target - swift - frontend - dump - scope - maps expanded % s 2 > % t . expanded <nl> / / RUN : % FileCheck - check - prefix CHECK - EXPANDED % s < % t . expanded <nl> <nl> func localPatternsWithSharedType ( ) { <nl> / / CHECK - EXPANDED - NEXT : ` - AbstractFunctionParams { { . * } } _ param 1 : 0 [ 183 : 39 - 183 : 39 ] expanded <nl> / / CHECK - EXPANDED - NEXT : ` - AbstractFunctionParams { { . * } } _ param 1 : 1 [ 183 : 39 - 183 : 39 ] expanded <nl> <nl> - / / CHECK - EXPANDED : ` - AbstractFunctionDecl { { . * } } localPatternsWithSharedType ( ) [ 186 : 1 - 188 : 1 ] expanded <nl> + / / CHECK - EXPANDED : - AbstractFunctionDecl { { . * } } localPatternsWithSharedType ( ) [ 186 : 1 - 188 : 1 ] expanded <nl> / / CHECK - EXPANDED : ` - BraceStmt { { . * } } [ 186 : 36 - 188 : 1 ] expanded <nl> / / CHECK - EXPANDED - NEXT : ` - PatternBinding { { . * } } entry 0 [ 187 : 7 - 188 : 1 ] expanded <nl> / / CHECK - EXPANDED - NEXT : ` - AfterPatternBinding { { . * } } entry 0 [ 187 : 7 - 188 : 1 ] expanded <nl> func localPatternsWithSharedType ( ) { <nl> / / CHECK - EXPANDED - NEXT : ` - PatternBinding { { . * } } entry 2 [ 187 : 13 - 188 : 1 ] expanded <nl> / / CHECK - EXPANDED - NEXT : ` - AfterPatternBinding { { . * } } entry 2 [ 187 : 16 - 188 : 1 ] expanded <nl> <nl> - / / RUN : not % target - swift - frontend - dump - scope - maps 70 : 8 , 26 : 20 , 5 : 18 , 166 : 32 , 179 : 18 % s 2 > % t . searches <nl> + / / RUN : not % target - swift - frontend - dump - scope - maps 70 : 8 , 26 : 20 , 5 : 18 , 166 : 32 , 179 : 18 , 193 : 26 % s 2 > % t . searches <nl> / / RUN : % FileCheck - check - prefix CHECK - SEARCHES % s < % t . searches <nl> <nl> / / CHECK - SEARCHES - LABEL : * * * Scope at 70 : 8 * * * <nl> func localPatternsWithSharedType ( ) { <nl> / / CHECK - SEARCHES - NEXT : { { . * } } StructDecl name = PatternInitializers <nl> / / CHECK - SEARCHES - NEXT : { { . * } } Initializer PatternBinding { { . * } } # 1 <nl> <nl> + / / CHECK - SEARCHES - LABEL : * * * Scope at 193 : 26 * * * <nl> + / / CHECK - SEARCHES - NEXT : PatternInitializer { { . * } } entry 0 [ 193 : 24 - 193 : 29 ] expanded <nl> + / / CHECK - SEARCHES - NEXT : name = scope_map <nl> + / / CHECK - SEARCHES - NEXT : FileUnit file = " { { . * } } scope_map . swift " <nl> + / / CHECK - SEARCHES - NEXT : ClassDecl name = LazyProperties <nl> + / / CHECK - SEARCHES - NEXT : Initializer PatternBinding { { . * } } # 0 <nl> + / / CHECK - SEARCHES - NEXT : Local bindings : self <nl> + <nl> / / CHECK - SEARCHES - LABEL : * * * Complete scope map * * * <nl> / / CHECK - SEARCHES - NEXT : SourceFile { { . * } } ' { { . * } } scope_map . swift ' [ 1 : 1 - { { . * } } : 1 ] expanded <nl> / / CHECK - SEARCHES : TypeOrExtensionBody { { . * } } ' S0 ' [ 4 : 11 - 6 : 1 ] expanded <nl> func localPatternsWithSharedType ( ) { <nl> / / CHECK - SEARCHES : ` - PatternBinding { { . * } } entry 1 [ 179 : 7 - 179 : 25 ] expanded <nl> / / CHECK - SEARCHES : ` - PatternInitializer { { . * } } entry 1 [ 179 : 16 - 179 : 25 ] expanded <nl> / / CHECK - SEARCHES - NOT : { { expanded } } <nl> + / / CHECK - SEARCHES : - TypeOrExtensionBody { { . * } } ' LazyProperties ' [ 190 : 22 - 194 : 1 ] expanded <nl> + / / CHECK - SEARCHES - NEXT : | - PatternBinding { { . * } } entry 0 [ 191 : 7 - 191 : 20 ] unexpanded <nl> + / / CHECK - SEARCHES - NEXT : ` - PatternBinding { { . * } } entry 0 [ 193 : 12 - 193 : 29 ] expanded <nl> + / / CHECK - SEARCHES - NEXT : | - Accessors { { . * } } scope_map . ( file ) . LazyProperties . prop @ { { . * } } scope_map . swift : 193 : 12 [ 193 : 12 - 193 : 12 ] unexpanded <nl> + / / CHECK - SEARCHES - NEXT : ` - PatternInitializer { { . * } } entry 0 [ 193 : 24 - 193 : 29 ] expanded <nl> + / / CHECK - SEARCHES - NOT : { { expanded } } <nl> mmm a / test / NameBinding / scope_map_lookup . swift <nl> ppp b / test / NameBinding / scope_map_lookup . swift <nl> class LazyProperties { <nl> localvar + = 1 <nl> _ = localvar <nl> } <nl> + <nl> + var value : Int = 17 <nl> + <nl> + lazy var prop1 : Int = value <nl> + <nl> + lazy var prop2 : Int = { value + 1 } ( ) <nl> + <nl> + lazy var prop3 : Int = { [ weak self ] in self . value + 1 } ( ) <nl> + <nl> + lazy var prop4 : Int = self . value <nl> + <nl> + lazy var prop5 : Int = { self . value + 1 } ( ) <nl> } <nl>
[ Name lookup ] Support lookup of ' self ' in lazy property initializers .
apple/swift
67bf68ae70bbd39078de46857ec89c33cb6efad2
2016-09-08T18:24:03Z
mmm a / doc / environment_variables . md <nl> ppp b / doc / environment_variables . md <nl> gRPC environment variables <nl> gRPC C core based implementations ( those contained in this repository ) expose <nl> some configuration as environment variables that can be set . <nl> <nl> + * http_proxy <nl> + The URI of the proxy to use for HTTP CONNECT support . Does not currently <nl> + support username or password information in the URI . <nl> + <nl> * GRPC_ABORT_ON_LEAKS <nl> A debugging aid to cause a call to abort ( ) when gRPC objects are leaked past <nl> grpc_shutdown ( ) . Set to 1 to cause the abort , if unset or 0 it does not <nl>
Merge pull request from markdroth / http_connect_doc
grpc/grpc
3c3838050eaac83003c39f9228c4f8e2679d645d
2017-06-16T14:31:59Z
new file mode 100644 <nl> index 0000000000 . . 65f0c411fc <nl> mmm / dev / null <nl> ppp b / code / dynamic_programming / largest_sum_contiguous_subarray / maxSubArraySum . py <nl> <nl> + # Function to find the maximum contiguous subarray using Kadane ' s algorithm <nl> + def maxSubArraySum ( a , size ) : <nl> + <nl> + max_so_far = - 9223372036854775806 # negative sys . maxint <nl> + max_ending_here = 0 <nl> + <nl> + for i in range ( 0 , size ) : <nl> + max_ending_here = max_ending_here + a [ i ] <nl> + if ( max_so_far < max_ending_here ) : <nl> + max_so_far = max_ending_here <nl> + <nl> + if max_ending_here < 0 : <nl> + max_ending_here = 0 <nl> + return max_so_far <nl> + <nl> + # Enter the number seperated by spaces <nl> + arr = [ int ( x ) for x in input ( ) . split ( ) ] <nl> + <nl> + print ( " Maximum contiguous sum is " , maxSubArraySum ( arr , len ( arr ) ) ) <nl>
Create maxSubArraySum . py
OpenGenus/cosmos
e4938138395971ab089169067e523a39f3023f3a
2017-10-04T17:31:07Z
mmm a / xbmc / filesystem / win32 / Win32File . cpp <nl> ppp b / xbmc / filesystem / win32 / Win32File . cpp <nl> ssize_t CWin32File : : Read ( void * lpBuf , size_t uiBufSize ) <nl> if ( ! ReadFile ( m_hFile , dummyBuf . get ( ) , 0 , & bytesRead , NULL ) ) <nl> return - 1 ; <nl> <nl> - assert ( bytesRead ! = 0 ) ; <nl> + assert ( bytesRead = = 0 ) ; <nl> return 0 ; <nl> } <nl> <nl>
[ Fix ] Reading zero bytes from a file should yield zero read bytes .
xbmc/xbmc
aecdcd66f4be769b54fd898f415d7f3813db9f50
2014-11-17T14:45:58Z
mmm a / samples / Cpp / TestCpp / Classes / PerformanceTest / PerformanceNodeChildrenTest . cpp <nl> ppp b / samples / Cpp / TestCpp / Classes / PerformanceTest / PerformanceNodeChildrenTest . cpp <nl> void AddSpriteSheet : : update ( float dt ) <nl> { <nl> batchNode - > addChild ( sprites [ i ] , zs [ i ] , kTagBase + i ) ; <nl> } <nl> - <nl> - batchNode - > sortAllChildren ( ) ; <nl> - <nl> CC_PROFILER_STOP ( this - > profilerName ( ) ) ; <nl> <nl> + <nl> + batchNode - > sortAllChildren ( ) ; <nl> + <nl> / / remove them <nl> for ( int i = 0 ; i < totalToAdd ; i + + ) <nl> { <nl>
AddChild test fix
cocos2d/cocos2d-x
5abb57e5f21069672ee8f1d8374dceea1761eb44
2013-08-31T13:59:44Z
mmm a / dlib / cmake <nl> ppp b / dlib / cmake <nl> <nl> + # This is a CMake file meant to be included via include ( ) <nl> <nl> cmake_minimum_required ( VERSION 2 . 6 . 4 ) <nl> <nl> if ( POLICY CMP0054 ) <nl> cmake_policy ( SET CMP0054 NEW ) <nl> endif ( ) <nl> <nl> - # Don ' t add dlib if it ' s already been added to the cmake project <nl> - if ( NOT TARGET dlib ) <nl> <nl> - # Determine the path to dlib . <nl> - string ( REGEX REPLACE " cmake $ " " " dlib_path $ { CMAKE_CURRENT_LIST_FILE } ) <nl> + # Determine the path to dlib . <nl> + string ( REGEX REPLACE " cmake $ " " " dlib_path $ { CMAKE_CURRENT_LIST_FILE } ) <nl> <nl> - if ( CMAKE_COMPILER_IS_GNUCXX ) <nl> - # By default , g + + won ' t warn or error if you forget to return a value in a <nl> - # function which requires you to do so . This option makes it give a warning <nl> - # for doing this . <nl> - add_definitions ( " - Wreturn - type " ) <nl> - endif ( ) <nl> + if ( CMAKE_COMPILER_IS_GNUCXX ) <nl> + # By default , g + + won ' t warn or error if you forget to return a value in a <nl> + # function which requires you to do so . This option makes it give a warning <nl> + # for doing this . <nl> + add_definitions ( " - Wreturn - type " ) <nl> + endif ( ) <nl> <nl> - # Setup some options to allow a user to enable SSE and AVX instruction use . <nl> - if ( CMAKE_COMPILER_IS_GNUCXX OR " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " Clang " <nl> - OR " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " GNU " <nl> - OR " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " Intel " ) <nl> - option ( USE_SSE2_INSTRUCTIONS " Compile your program with SSE2 instructions " OFF ) <nl> + # Setup some options to allow a user to enable SSE and AVX instruction use . <nl> + if ( CMAKE_COMPILER_IS_GNUCXX OR " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " Clang " <nl> + OR " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " GNU " <nl> + OR " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " Intel " ) <nl> + option ( USE_SSE2_INSTRUCTIONS " Compile your program with SSE2 instructions " OFF ) <nl> + option ( USE_SSE4_INSTRUCTIONS " Compile your program with SSE4 instructions " OFF ) <nl> + option ( USE_AVX_INSTRUCTIONS " Compile your program with AVX instructions " OFF ) <nl> + if ( USE_AVX_INSTRUCTIONS ) <nl> + add_definitions ( - mavx ) <nl> + elseif ( USE_SSE4_INSTRUCTIONS ) <nl> + add_definitions ( - msse4 ) <nl> + elseif ( USE_SSE2_INSTRUCTIONS ) <nl> + add_definitions ( - msse2 ) <nl> + endif ( ) <nl> + elseif ( MSVC OR " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " MSVC " ) # else if using Visual Studio <nl> + # Use SSE2 by default when using Visual Studio . <nl> + option ( USE_SSE2_INSTRUCTIONS " Compile your program with SSE2 instructions " ON ) <nl> + # Visual Studio 2005 didn ' t support SSE4 <nl> + if ( NOT MSVC80 ) <nl> option ( USE_SSE4_INSTRUCTIONS " Compile your program with SSE4 instructions " OFF ) <nl> + endif ( ) <nl> + # Visual Studio 2005 and 2008 didn ' t support AVX <nl> + if ( NOT MSVC80 AND NOT MSVC90 ) <nl> option ( USE_AVX_INSTRUCTIONS " Compile your program with AVX instructions " OFF ) <nl> - if ( USE_AVX_INSTRUCTIONS ) <nl> - add_definitions ( - mavx ) <nl> - elseif ( USE_SSE4_INSTRUCTIONS ) <nl> - add_definitions ( - msse4 ) <nl> - elseif ( USE_SSE2_INSTRUCTIONS ) <nl> - add_definitions ( - msse2 ) <nl> + endif ( ) <nl> + include ( CheckTypeSize ) <nl> + check_type_size ( " void * " SIZE_OF_VOID_PTR ) <nl> + if ( USE_AVX_INSTRUCTIONS ) <nl> + add_definitions ( / arch : AVX ) <nl> + elseif ( USE_SSE4_INSTRUCTIONS ) <nl> + # Visual studio doesn ' t have an / arch : SSE2 flag when building in 64 bit modes . <nl> + # So only give it when we are doing a 32 bit build . <nl> + if ( SIZE_OF_VOID_PTR EQUAL 4 ) <nl> + add_definitions ( / arch : SSE2 ) <nl> endif ( ) <nl> - elseif ( MSVC OR " $ { CMAKE_CXX_COMPILER_ID } " STREQUAL " MSVC " ) # else if using Visual Studio <nl> - # Use SSE2 by default when using Visual Studio . <nl> - option ( USE_SSE2_INSTRUCTIONS " Compile your program with SSE2 instructions " ON ) <nl> - # Visual Studio 2005 didn ' t support SSE4 <nl> - if ( NOT MSVC80 ) <nl> - option ( USE_SSE4_INSTRUCTIONS " Compile your program with SSE4 instructions " OFF ) <nl> - endif ( ) <nl> - # Visual Studio 2005 and 2008 didn ' t support AVX <nl> - if ( NOT MSVC80 AND NOT MSVC90 ) <nl> - option ( USE_AVX_INSTRUCTIONS " Compile your program with AVX instructions " OFF ) <nl> - endif ( ) <nl> - include ( CheckTypeSize ) <nl> - check_type_size ( " void * " SIZE_OF_VOID_PTR ) <nl> - if ( USE_AVX_INSTRUCTIONS ) <nl> - add_definitions ( / arch : AVX ) <nl> - elseif ( USE_SSE4_INSTRUCTIONS ) <nl> - # Visual studio doesn ' t have an / arch : SSE2 flag when building in 64 bit modes . <nl> - # So only give it when we are doing a 32 bit build . <nl> - if ( SIZE_OF_VOID_PTR EQUAL 4 ) <nl> - add_definitions ( / arch : SSE2 ) <nl> - endif ( ) <nl> - add_definitions ( - DDLIB_HAVE_SSE2 ) <nl> - add_definitions ( - DDLIB_HAVE_SSE3 ) <nl> - add_definitions ( - DDLIB_HAVE_SSE41 ) <nl> - elseif ( USE_SSE2_INSTRUCTIONS ) <nl> - # Visual studio doesn ' t have an / arch : SSE2 flag when building in 64 bit modes . <nl> - # So only give it when we are doing a 32 bit build . <nl> - if ( SIZE_OF_VOID_PTR EQUAL 4 ) <nl> - add_definitions ( / arch : SSE2 ) <nl> - endif ( ) <nl> - add_definitions ( - DDLIB_HAVE_SSE2 ) <nl> + add_definitions ( - DDLIB_HAVE_SSE2 ) <nl> + add_definitions ( - DDLIB_HAVE_SSE3 ) <nl> + add_definitions ( - DDLIB_HAVE_SSE41 ) <nl> + elseif ( USE_SSE2_INSTRUCTIONS ) <nl> + # Visual studio doesn ' t have an / arch : SSE2 flag when building in 64 bit modes . <nl> + # So only give it when we are doing a 32 bit build . <nl> + if ( SIZE_OF_VOID_PTR EQUAL 4 ) <nl> + add_definitions ( / arch : SSE2 ) <nl> endif ( ) <nl> + add_definitions ( - DDLIB_HAVE_SSE2 ) <nl> endif ( ) <nl> + endif ( ) <nl> + <nl> <nl> + # Add folder containing dlib to the include search path . <nl> + INCLUDE_DIRECTORIES ( $ { dlib_path } / . . ) <nl> <nl> - # Add folder containing dlib to the include search path . <nl> - INCLUDE_DIRECTORIES ( $ { dlib_path } / . . ) <nl> + # This is really optional , but nice . It will make sure the build mode <nl> + # created by cmake is always release by default . <nl> + include ( $ { dlib_path } / release_build_by_default ) <nl> <nl> - # This is really optional , but nice . It will make sure the build mode <nl> - # created by cmake is always release by default . <nl> - include ( $ { dlib_path } / release_build_by_default ) <nl> <nl> + # Don ' t add dlib if it ' s already been added to the cmake project <nl> + if ( NOT TARGET dlib ) <nl> add_subdirectory ( $ { dlib_path } dlib_build ) <nl> endif ( ) <nl> <nl>
Made cmake script work a little better in projects that involve a lot of
davisking/dlib
cd64c518009cb52c62a39439292202ec796ec486
2015-02-16T22:14:34Z
mmm a / hphp / runtime / vm / jit / vasm - arm . cpp <nl> ppp b / hphp / runtime / vm / jit / vasm - arm . cpp <nl> void Vgen : : emit ( const mcprep & i ) { <nl> / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / <nl> <nl> void Vgen : : emit ( const call & i ) { <nl> - / / If target can be addressed by pc relative offset ( signed 26 bits ) , emit <nl> - / / PC relative Branch and Link . Else , emit target address into code and load <nl> - / / from there . <nl> - auto diff = ( i . target - a - > frontier ( ) ) > > vixl : : kInstructionSizeLog2 ; <nl> - if ( vixl : : is_int26 ( diff ) ) { <nl> - recordAddressImmediate ( ) ; <nl> - a - > bl ( diff ) ; <nl> - } else { <nl> - recordAddressImmediate ( ) ; <nl> - a - > Mov ( rAsm , i . target ) ; <nl> - a - > Blr ( rAsm ) ; <nl> - } <nl> + recordAddressImmediate ( ) ; <nl> + a - > Mov ( rAsm , i . target ) ; <nl> + a - > Blr ( rAsm ) ; <nl> if ( i . watch ) { <nl> * i . watch = a - > frontier ( ) ; <nl> env . meta . watchpoints . push_back ( i . watch ) ; <nl>
Remove unnecessary optimization when lowering call vasm instructions on ARM
facebook/hhvm
55d1b46f5b35bddd20b0193b83b95121e3033310
2018-09-13T15:12:23Z
mmm a / table / plain_table_index . cc <nl> ppp b / table / plain_table_index . cc <nl> Slice PlainTableIndexBuilder : : FillIndexes ( <nl> } <nl> assert ( sub_index_offset = = sub_index_size_ ) ; <nl> <nl> - Log ( InfoLogLevel : : DEBUG_INFO , ioptions_ . info_log , <nl> + Log ( InfoLogLevel : : DEBUG_LEVEL , ioptions_ . info_log , <nl> " hash table size : % d , suffix_map length % zu " , <nl> index_size_ , sub_index_size_ ) ; <nl> return Slice ( allocated , GetTotalSize ( ) ) ; <nl>
Fix compile error in table / plain_table_index . cc
facebook/rocksdb
fd95745a59f5fc6e0e76b1395314097162486f7b
2014-10-30T00:42:38Z
mmm a / code / graph - algorithms / bridges_in_graph / bridges . cpp <nl> ppp b / code / graph - algorithms / bridges_in_graph / bridges . cpp <nl> <nl> # include < vector > <nl> # include < map > <nl> # include < algorithm > <nl> - <nl> + / / Part of Cosmos by OpenGenus Foundation <nl> using namespace std ; <nl> <nl> # define WHITE 0 <nl>
header comment added
OpenGenus/cosmos
f21913086f5f5dadb8c7c689522dc0d9378754de
2017-10-04T08:15:23Z