idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
32,400
private static List < JoinNode > generateOuterJoinOrdersForTree ( JoinNode subTree ) { List < JoinNode > treePermutations = new ArrayList < > ( ) ; treePermutations . add ( subTree ) ; return treePermutations ; }
Helper method to generate join orders for an OUTER join tree . At the moment permutations for LEFT Joins are not supported yet
32,401
private static List < JoinNode > generateFullJoinOrdersForTree ( JoinNode subTree ) { assert ( subTree != null ) ; List < JoinNode > joinOrders = new ArrayList < > ( ) ; if ( ! ( subTree instanceof BranchNode ) ) { joinOrders . add ( subTree ) ; return joinOrders ; } BranchNode branchNode = ( BranchNode ) subTree ; assert ( branchNode . getLeftNode ( ) != null ) ; List < JoinNode > leftJoinOrders = generateFullJoinOrdersForTree ( branchNode . getLeftNode ( ) ) ; assert ( ! leftJoinOrders . isEmpty ( ) ) ; assert ( branchNode . getRightNode ( ) != null ) ; List < JoinNode > rightJoinOrders = generateFullJoinOrdersForTree ( branchNode . getRightNode ( ) ) ; assert ( ! rightJoinOrders . isEmpty ( ) ) ; for ( JoinNode leftNode : leftJoinOrders ) { for ( JoinNode rightNode : rightJoinOrders ) { JoinNode resultOne = new BranchNode ( branchNode . getId ( ) , branchNode . getJoinType ( ) , ( JoinNode ) leftNode . clone ( ) , ( JoinNode ) rightNode . clone ( ) ) ; JoinNode resultTwo = new BranchNode ( branchNode . getId ( ) , branchNode . getJoinType ( ) , ( JoinNode ) rightNode . clone ( ) , ( JoinNode ) leftNode . clone ( ) ) ; if ( branchNode . getJoinExpression ( ) != null ) { resultOne . setJoinExpression ( branchNode . getJoinExpression ( ) . clone ( ) ) ; resultTwo . setJoinExpression ( branchNode . getJoinExpression ( ) . clone ( ) ) ; } if ( branchNode . getWhereExpression ( ) != null ) { resultOne . setWhereExpression ( branchNode . getWhereExpression ( ) . clone ( ) ) ; resultTwo . setWhereExpression ( branchNode . getWhereExpression ( ) . clone ( ) ) ; } joinOrders . add ( resultOne ) ; joinOrders . add ( resultTwo ) ; } } return joinOrders ; }
Helper method to generate join orders for a join tree containing only FULL joins . The only allowed permutation is a join order that has original left and right nodes swapped .
32,402
private void generateMorePlansForJoinTree ( JoinNode joinTree ) { assert ( joinTree != null ) ; generateAccessPaths ( joinTree ) ; List < JoinNode > nodes = joinTree . generateAllNodesJoinOrder ( ) ; generateSubPlanForJoinNodeRecursively ( joinTree , 0 , nodes ) ; }
Given a specific join order compute all possible sub - plan - graphs for that join order and add them to the deque of plans . If this doesn t add plans it doesn t mean no more plans can be generated . It s possible that the particular join order it got had no reasonable plans .
32,403
private void generateInnerAccessPaths ( BranchNode parentNode ) { JoinNode innerChildNode = parentNode . getRightNode ( ) ; assert ( innerChildNode != null ) ; if ( parentNode . getJoinType ( ) == JoinType . INNER ) { parentNode . m_joinInnerOuterList . addAll ( parentNode . m_whereInnerOuterList ) ; parentNode . m_whereInnerOuterList . clear ( ) ; parentNode . m_joinInnerList . addAll ( parentNode . m_whereInnerList ) ; parentNode . m_whereInnerList . clear ( ) ; } if ( innerChildNode instanceof BranchNode ) { generateOuterAccessPaths ( ( BranchNode ) innerChildNode ) ; generateInnerAccessPaths ( ( BranchNode ) innerChildNode ) ; innerChildNode . m_accessPaths . add ( getRelevantNaivePath ( parentNode . m_joinInnerOuterList , parentNode . m_joinInnerList ) ) ; return ; } List < AbstractExpression > filterExprs = null ; List < AbstractExpression > postExprs = null ; if ( parentNode . getJoinType ( ) != JoinType . FULL ) { filterExprs = parentNode . m_joinInnerList ; } else { postExprs = parentNode . m_joinInnerList ; } StmtTableScan innerTable = innerChildNode . getTableScan ( ) ; assert ( innerTable != null ) ; innerChildNode . m_accessPaths . addAll ( getRelevantAccessPathsForTable ( innerTable , parentNode . m_joinInnerOuterList , filterExprs , postExprs ) ) ; boolean mayNeedInnerSendReceive = ( ! m_partitioning . wasSpecifiedAsSingle ( ) ) && ( m_partitioning . getCountOfPartitionedTables ( ) > 0 ) && ( parentNode . getJoinType ( ) != JoinType . INNER ) && ! innerTable . getIsReplicated ( ) ; if ( mayNeedInnerSendReceive && ! parentNode . m_joinInnerOuterList . isEmpty ( ) ) { List < AccessPath > innerOuterAccessPaths = new ArrayList < > ( ) ; for ( AccessPath innerAccessPath : innerChildNode . m_accessPaths ) { if ( ( innerAccessPath . index != null ) && hasInnerOuterIndexExpression ( innerChildNode . getTableAlias ( ) , innerAccessPath . indexExprs , innerAccessPath . initialExpr , innerAccessPath . endExprs ) ) { innerOuterAccessPaths . add ( innerAccessPath ) ; } } if ( parentNode . getJoinType ( ) != JoinType . FULL ) { filterExprs = parentNode . m_joinInnerList ; postExprs = parentNode . m_joinInnerOuterList ; } else { filterExprs = null ; postExprs = new ArrayList < > ( parentNode . m_joinInnerList ) ; postExprs . addAll ( parentNode . m_joinInnerOuterList ) ; } Collection < AccessPath > nljAccessPaths = getRelevantAccessPathsForTable ( innerTable , null , filterExprs , postExprs ) ; innerChildNode . m_accessPaths . clear ( ) ; innerChildNode . m_accessPaths . addAll ( nljAccessPaths ) ; innerChildNode . m_accessPaths . addAll ( innerOuterAccessPaths ) ; } assert ( innerChildNode . m_accessPaths . size ( ) > 0 ) ; }
Generate all possible access paths for an inner node in a join . The set of potential index expressions depends whether the inner node can be inlined with the NLIJ or not . In the former case inner and inner - outer join expressions can be considered for the index access . In the latter only inner join expressions qualifies .
32,404
private AbstractPlanNode getSelectSubPlanForJoinNode ( JoinNode joinNode ) { assert ( joinNode != null ) ; if ( joinNode instanceof BranchNode ) { BranchNode branchJoinNode = ( BranchNode ) joinNode ; AbstractPlanNode outerScanPlan = getSelectSubPlanForJoinNode ( branchJoinNode . getLeftNode ( ) ) ; if ( outerScanPlan == null ) { return null ; } AbstractPlanNode innerScanPlan = getSelectSubPlanForJoinNode ( ( branchJoinNode ) . getRightNode ( ) ) ; if ( innerScanPlan == null ) { return null ; } IndexSortablePlanNode answer = getSelectSubPlanForJoin ( branchJoinNode , outerScanPlan , innerScanPlan ) ; if ( ( answer != null ) && ( branchJoinNode . getJoinType ( ) == JoinType . INNER ) && outerScanPlan instanceof IndexSortablePlanNode ) { IndexUseForOrderBy indexUseForJoin = answer . indexUse ( ) ; IndexUseForOrderBy indexUseFromScan = ( ( IndexSortablePlanNode ) outerScanPlan ) . indexUse ( ) ; indexUseForJoin . setWindowFunctionUsesIndex ( indexUseFromScan . getWindowFunctionUsesIndex ( ) ) ; indexUseForJoin . setWindowFunctionIsCompatibleWithOrderBy ( indexUseFromScan . isWindowFunctionCompatibleWithOrderBy ( ) ) ; indexUseForJoin . setFinalExpressionOrderFromIndexScan ( indexUseFromScan . getFinalExpressionOrderFromIndexScan ( ) ) ; indexUseForJoin . setSortOrderFromIndexScan ( indexUseFromScan . getSortOrderFromIndexScan ( ) ) ; } if ( answer == null ) { return null ; } return answer . planNode ( ) ; } AbstractPlanNode scanNode = getAccessPlanForTable ( joinNode ) ; if ( joinNode instanceof SubqueryLeafNode ) { StmtSubqueryScan tableScan = ( ( SubqueryLeafNode ) joinNode ) . getSubqueryScan ( ) ; CompiledPlan subQueryPlan = tableScan . getBestCostPlan ( ) ; assert ( subQueryPlan != null ) ; assert ( subQueryPlan . rootPlanGraph != null ) ; subQueryPlan . rootPlanGraph . disconnectParents ( ) ; scanNode . addAndLinkChild ( subQueryPlan . rootPlanGraph ) ; } return scanNode ; }
Given a specific join node and access path set for inner and outer tables construct the plan that gives the right tuples .
32,405
private static List < AbstractExpression > filterSingleTVEExpressions ( List < AbstractExpression > exprs , List < AbstractExpression > otherExprs ) { List < AbstractExpression > singleTVEExprs = new ArrayList < > ( ) ; for ( AbstractExpression expr : exprs ) { List < TupleValueExpression > tves = ExpressionUtil . getTupleValueExpressions ( expr ) ; if ( tves . size ( ) == 1 ) { singleTVEExprs . add ( expr ) ; } else { otherExprs . add ( expr ) ; } } return singleTVEExprs ; }
A method to filter out single - TVE expressions .
32,406
public void notifyShutdown ( ) { if ( m_shutdown . compareAndSet ( false , true ) ) { for ( KafkaExternalConsumerRunner consumer : m_consumers ) { consumer . shutdown ( ) ; } close ( ) ; } }
shutdown hook to notify kafka consumer threads of shutdown
32,407
protected void runDDL ( String ddl , boolean transformDdl ) { String modifiedDdl = ( transformDdl ? transformDDL ( ddl ) : ddl ) ; printTransformedSql ( ddl , modifiedDdl ) ; super . runDDL ( modifiedDdl ) ; }
Optionally modifies DDL statements in such a way that PostgreSQL results will match VoltDB results ; and then passes the remaining work to the base class version .
32,408
protected String getVoltColumnTypeName ( String columnTypeName ) { String equivalentTypeName = m_PostgreSQLTypeNames . get ( columnTypeName ) ; return ( equivalentTypeName == null ) ? columnTypeName . toUpperCase ( ) : equivalentTypeName ; }
Returns the column type name in VoltDB corresponding to the specified column type name in PostgreSQL .
32,409
static private int numOccurencesOfCharIn ( String str , char ch ) { boolean inMiddleOfQuote = false ; int num = 0 , previousIndex = 0 ; for ( int index = str . indexOf ( ch ) ; index >= 0 ; index = str . indexOf ( ch , index + 1 ) ) { if ( hasOddNumberOfSingleQuotes ( str . substring ( previousIndex , index ) ) ) { inMiddleOfQuote = ! inMiddleOfQuote ; } if ( ! inMiddleOfQuote ) { num ++ ; } previousIndex = index ; } return num ; }
Returns the number of occurrences of the specified character in the specified String but ignoring those contained in single quotes .
32,410
static private int indexOfNthOccurrenceOfCharIn ( String str , char ch , int n ) { boolean inMiddleOfQuote = false ; int index = - 1 , previousIndex = 0 ; for ( int i = 0 ; i < n ; i ++ ) { do { index = str . indexOf ( ch , index + 1 ) ; if ( index < 0 ) { return - 1 ; } if ( hasOddNumberOfSingleQuotes ( str . substring ( previousIndex , index ) ) ) { inMiddleOfQuote = ! inMiddleOfQuote ; } previousIndex = index ; } while ( inMiddleOfQuote ) ; } return index ; }
Returns the Nth occurrence of the specified character in the specified String but ignoring those contained in single quotes .
32,411
protected VoltTable runDML ( String dml , boolean transformDml ) { String modifiedDml = ( transformDml ? transformDML ( dml ) : dml ) ; printTransformedSql ( dml , modifiedDml ) ; return super . runDML ( modifiedDml ) ; }
Optionally modifies queries in such a way that PostgreSQL results will match VoltDB results ; and then passes the remaining work to the base class version .
32,412
static int getClassCode ( Class cla ) { if ( ! cla . isPrimitive ( ) ) { return ArrayUtil . CLASS_CODE_OBJECT ; } return classCodeMap . get ( cla , - 1 ) ; }
Returns a distinct int code for each primitive type and for all Object types .
32,413
public static void clearArray ( int type , Object data , int from , int to ) { switch ( type ) { case ArrayUtil . CLASS_CODE_BYTE : { byte [ ] array = ( byte [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_CHAR : { byte [ ] array = ( byte [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_SHORT : { short [ ] array = ( short [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_INT : { int [ ] array = ( int [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_LONG : { long [ ] array = ( long [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_FLOAT : { float [ ] array = ( float [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_DOUBLE : { double [ ] array = ( double [ ] ) data ; while ( -- to >= from ) { array [ to ] = 0 ; } return ; } case ArrayUtil . CLASS_CODE_BOOLEAN : { boolean [ ] array = ( boolean [ ] ) data ; while ( -- to >= from ) { array [ to ] = false ; } return ; } default : { Object [ ] array = ( Object [ ] ) data ; while ( -- to >= from ) { array [ to ] = null ; } return ; } } }
Clears an area of the given array of the given type .
32,414
public static void adjustArray ( int type , Object array , int usedElements , int index , int count ) { if ( index >= usedElements ) { return ; } int newCount = usedElements + count ; int source ; int target ; int size ; if ( count >= 0 ) { source = index ; target = index + count ; size = usedElements - index ; } else { source = index - count ; target = index ; size = usedElements - index + count ; } if ( size > 0 ) { System . arraycopy ( array , source , array , target , size ) ; } if ( count < 0 ) { clearArray ( type , array , newCount , usedElements ) ; } }
Moves the contents of an array to allow both addition and removal of elements . Used arguments must be in range .
32,415
public static void sortArray ( int [ ] array ) { boolean swapped ; do { swapped = false ; for ( int i = 0 ; i < array . length - 1 ; i ++ ) { if ( array [ i ] > array [ i + 1 ] ) { int temp = array [ i + 1 ] ; array [ i + 1 ] = array [ i ] ; array [ i ] = temp ; swapped = true ; } } } while ( swapped ) ; }
Basic sort for small arrays of int .
32,416
public static int find ( Object [ ] array , Object object ) { for ( int i = 0 ; i < array . length ; i ++ ) { if ( array [ i ] == object ) { return i ; } if ( object != null && object . equals ( array [ i ] ) ) { return i ; } } return - 1 ; }
Basic find for small arrays of Object .
32,417
public static int findNot ( int [ ] array , int value ) { for ( int i = 0 ; i < array . length ; i ++ ) { if ( array [ i ] != value ) { return i ; } } return - 1 ; }
Finds the first element of the array that is not equal to the given value .
32,418
public static boolean areEqualSets ( int [ ] arra , int [ ] arrb ) { return arra . length == arrb . length && ArrayUtil . haveEqualSets ( arra , arrb , arra . length ) ; }
Returns true if arra and arrb contain the same set of integers not necessarily in the same order . This implies the arrays are of the same length .
32,419
public static boolean haveEqualArrays ( int [ ] arra , int [ ] arrb , int count ) { if ( count > arra . length || count > arrb . length ) { return false ; } for ( int j = 0 ; j < count ; j ++ ) { if ( arra [ j ] != arrb [ j ] ) { return false ; } } return true ; }
Returns true if the first count elements of arra and arrb are identical subarrays of integers
32,420
public static boolean haveEqualArrays ( Object [ ] arra , Object [ ] arrb , int count ) { if ( count > arra . length || count > arrb . length ) { return false ; } for ( int j = 0 ; j < count ; j ++ ) { if ( arra [ j ] != arrb [ j ] ) { if ( arra [ j ] == null || ! arra [ j ] . equals ( arrb [ j ] ) ) { return false ; } } } return true ; }
Returns true if the first count elements of arra and arrb are identical subarrays of Objects
32,421
public static int countSameElements ( byte [ ] arra , int start , byte [ ] arrb ) { int k = 0 ; int limit = arra . length - start ; if ( limit > arrb . length ) { limit = arrb . length ; } for ( int i = 0 ; i < limit ; i ++ ) { if ( arra [ i + start ] == arrb [ i ] ) { k ++ ; } else { break ; } } return k ; }
Returns the count of elements in arra from position start that are sequentially equal to the elements of arrb .
32,422
public static int find ( byte [ ] arra , int start , int limit , byte [ ] arrb ) { int k = start ; limit = limit - arrb . length + 1 ; int value = arrb [ 0 ] ; for ( ; k < limit ; k ++ ) { if ( arra [ k ] == value ) { if ( arrb . length == 1 ) { return k ; } if ( containsAt ( arra , k , arrb ) ) { return k ; } } } return - 1 ; }
Returns the index of the first occurence of arrb in arra . Or - 1 if not found .
32,423
public static int find ( byte [ ] arra , int start , int limit , int b , int c ) { int k = 0 ; for ( ; k < limit ; k ++ ) { if ( arra [ k ] == b || arra [ k ] == c ) { return k ; } } return - 1 ; }
Returns the index of b or c in arra . Or - 1 if not found .
32,424
public static void intIndexesToBooleanArray ( int [ ] arra , boolean [ ] arrb ) { for ( int i = 0 ; i < arra . length ; i ++ ) { if ( arra [ i ] < arrb . length ) { arrb [ arra [ i ] ] = true ; } } }
Set elements of arrb true if their indexes appear in arrb .
32,425
public static boolean containsAllTrueElements ( boolean [ ] arra , boolean [ ] arrb ) { for ( int i = 0 ; i < arra . length ; i ++ ) { if ( arrb [ i ] && ! arra [ i ] ) { return false ; } } return true ; }
Return true if for each true element in arrb the corresponding element in arra is true
32,426
public static int countTrueElements ( boolean [ ] arra ) { int count = 0 ; for ( int i = 0 ; i < arra . length ; i ++ ) { if ( arra [ i ] ) { count ++ ; } } return count ; }
Return count of true elements in array
32,427
public static boolean hasNull ( Object [ ] array , int [ ] columnMap ) { int count = columnMap . length ; for ( int i = 0 ; i < count ; i ++ ) { if ( array [ columnMap [ i ] ] == null ) { return true ; } } return false ; }
Determines if the array has a null column for any of the positions given in the rowColMap array .
32,428
public static boolean containsAt ( byte [ ] arra , int start , byte [ ] arrb ) { return countSameElements ( arra , start , arrb ) == arrb . length ; }
Returns true if arra from position start contains all elements of arrb in sequential order .
32,429
public static int countStartElementsAt ( byte [ ] arra , int start , byte [ ] arrb ) { int k = 0 ; mainloop : for ( int i = start ; i < arra . length ; i ++ ) { for ( int j = 0 ; j < arrb . length ; j ++ ) { if ( arra [ i ] == arrb [ j ] ) { k ++ ; continue mainloop ; } } break ; } return k ; }
Returns the count of elements in arra from position start that are among the elements of arrb . Stops at any element not in arrb .
32,430
public static int [ ] arraySlice ( int [ ] source , int start , int count ) { int [ ] slice = new int [ count ] ; System . arraycopy ( source , start , slice , 0 , count ) ; return slice ; }
Returns a range of elements of source from start to end of the array .
32,431
public static void fillArray ( Object [ ] array , Object value ) { int to = array . length ; while ( -- to >= 0 ) { array [ to ] = value ; } }
Fills the array with a value .
32,432
public static Object duplicateArray ( Object source ) { int size = Array . getLength ( source ) ; Object newarray = Array . newInstance ( source . getClass ( ) . getComponentType ( ) , size ) ; System . arraycopy ( source , 0 , newarray , 0 , size ) ; return newarray ; }
Returns a duplicates of an array .
32,433
public static Object resizeArrayIfDifferent ( Object source , int newsize ) { int oldsize = Array . getLength ( source ) ; if ( oldsize == newsize ) { return source ; } Object newarray = Array . newInstance ( source . getClass ( ) . getComponentType ( ) , newsize ) ; if ( oldsize < newsize ) { newsize = oldsize ; } System . arraycopy ( source , 0 , newarray , 0 , newsize ) ; return newarray ; }
Returns the given array if newsize is the same as existing . Returns a new array of given size containing as many elements of the original array as it can hold .
32,434
public static void copyAdjustArray ( Object source , Object dest , Object addition , int colindex , int adjust ) { int length = Array . getLength ( source ) ; if ( colindex < 0 ) { System . arraycopy ( source , 0 , dest , 0 , length ) ; return ; } System . arraycopy ( source , 0 , dest , 0 , colindex ) ; if ( adjust == 0 ) { int endcount = length - colindex - 1 ; Array . set ( dest , colindex , addition ) ; if ( endcount > 0 ) { System . arraycopy ( source , colindex + 1 , dest , colindex + 1 , endcount ) ; } } else if ( adjust < 0 ) { int endcount = length - colindex - 1 ; if ( endcount > 0 ) { System . arraycopy ( source , colindex + 1 , dest , colindex , endcount ) ; } } else { int endcount = length - colindex ; Array . set ( dest , colindex , addition ) ; if ( endcount > 0 ) { System . arraycopy ( source , colindex , dest , colindex + 1 , endcount ) ; } } }
Copies elements of source to dest . If adjust is - 1 the element at colindex is not copied . If adjust is + 1 that element is filled with the Object addition . All the rest of the elements in source are shifted left or right accordingly when they are copied . If adjust is 0 the addition is copied over the element at colindex .
32,435
private static ColumnInfo [ ] prependColumn ( ColumnInfo firstColumn , ColumnInfo [ ] columns ) { int allLen = 1 + columns . length ; ColumnInfo [ ] allColumns = new ColumnInfo [ allLen ] ; allColumns [ 0 ] = firstColumn ; for ( int i = 0 ; i < columns . length ; i ++ ) { allColumns [ i + 1 ] = columns [ i ] ; } return allColumns ; }
Given a column and an array of columns return a new array of columns with the single guy prepended onto the others . This function is used in the constructor below so that one constructor can call another without breaking Java rules about chained constructors being the first thing called .
32,436
public final String getColumnName ( int index ) { assert ( verifyTableInvariants ( ) ) ; if ( ( index < 0 ) || ( index >= m_colCount ) ) { throw new IllegalArgumentException ( "Not a valid column index." ) ; } int pos = POS_COL_TYPES + m_colCount ; String name = null ; for ( int i = 0 ; i < index ; i ++ ) { pos += m_buffer . getInt ( pos ) + 4 ; } name = readString ( pos , METADATA_ENCODING ) ; assert ( name != null ) ; assert ( verifyTableInvariants ( ) ) ; return name ; }
Return the name of the column with the specified index .
32,437
public final void addRow ( Object ... values ) { assert ( verifyTableInvariants ( ) ) ; if ( m_readOnly ) { throw new IllegalStateException ( "Table is read-only. Make a copy before changing." ) ; } if ( m_colCount == 0 ) { throw new IllegalStateException ( "Table has no columns defined" ) ; } if ( values . length != m_colCount ) { throw new IllegalArgumentException ( values . length + " arguments but table has " + m_colCount + " columns" ) ; } final int pos = m_buffer . position ( ) ; try { m_buffer . limit ( m_buffer . capacity ( ) ) ; m_buffer . position ( pos + 4 ) ; int typePos = POS_COL_TYPES ; for ( int col = 0 ; col < m_colCount ; col ++ ) { Object value = values [ col ] ; VoltType columnType = VoltType . get ( m_buffer . get ( typePos + col ) ) ; addColumnValue ( value , columnType , col ) ; } final int rowsize = m_buffer . position ( ) - pos - 4 ; assert ( rowsize >= 0 ) ; if ( rowsize > VoltTableRow . MAX_TUPLE_LENGTH ) { throw new VoltOverflowException ( "Table row total length larger than allowed max " + VoltTableRow . MAX_TUPLE_LENGTH_STR ) ; } m_buffer . putInt ( pos , rowsize ) ; m_rowCount ++ ; m_buffer . putInt ( m_rowStart , m_rowCount ) ; } catch ( VoltTypeException vte ) { m_buffer . position ( pos ) ; throw vte ; } catch ( BufferOverflowException e ) { m_buffer . position ( pos ) ; expandBuffer ( ) ; addRow ( values ) ; } catch ( VoltOverflowException e ) { m_buffer . position ( pos ) ; throw e ; } catch ( IllegalArgumentException e ) { m_buffer . position ( pos ) ; if ( m_buffer . limit ( ) - m_buffer . position ( ) < 32 ) { expandBuffer ( ) ; addRow ( values ) ; } else { throw e ; } } finally { m_buffer . limit ( m_buffer . position ( ) ) ; } assert ( verifyTableInvariants ( ) ) ; }
Append a new row to the table using the supplied column values .
32,438
public static String varbinaryToPrintableString ( byte [ ] bin ) { PureJavaCrc32 crc = new PureJavaCrc32 ( ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "bin[crc:" ) ; crc . update ( bin ) ; sb . append ( crc . getValue ( ) ) ; sb . append ( ",value:0x" ) ; String hex = Encoder . hexEncode ( bin ) ; if ( hex . length ( ) > 13 ) { sb . append ( hex . substring ( 0 , 10 ) ) ; sb . append ( "..." ) ; } else { sb . append ( hex ) ; } sb . append ( "]" ) ; return sb . toString ( ) ; }
Make a printable short string for a varbinary . String includes a CRC and the contents of the varbinary in hex . Contents longer than 13 chars are truncated and elipsized . Yes elipsized is totally a word .
32,439
public String toJSONString ( ) { JSONStringer js = new JSONStringer ( ) ; try { js . object ( ) ; js . keySymbolValuePair ( JSON_STATUS_KEY , getStatusCode ( ) ) ; js . key ( JSON_SCHEMA_KEY ) . array ( ) ; for ( int i = 0 ; i < getColumnCount ( ) ; i ++ ) { js . object ( ) ; js . keySymbolValuePair ( JSON_NAME_KEY , getColumnName ( i ) ) ; js . keySymbolValuePair ( JSON_TYPE_KEY , getColumnType ( i ) . getValue ( ) ) ; js . endObject ( ) ; } js . endArray ( ) ; js . key ( JSON_DATA_KEY ) . array ( ) ; VoltTableRow row = cloneRow ( ) ; row . resetRowPosition ( ) ; while ( row . advanceRow ( ) ) { js . array ( ) ; for ( int i = 0 ; i < getColumnCount ( ) ; i ++ ) { row . putJSONRep ( i , js ) ; } js . endArray ( ) ; } js . endArray ( ) ; js . endObject ( ) ; } catch ( JSONException e ) { e . printStackTrace ( ) ; throw new RuntimeException ( "Failed to serialized a table to JSON." , e ) ; } return js . toString ( ) ; }
Get a JSON representation of this table .
32,440
public static VoltTable fromJSONString ( String json ) throws JSONException , IOException { JSONObject jsonObj = new JSONObject ( json ) ; return fromJSONObject ( jsonObj ) ; }
Construct a table from a JSON string . Only parses VoltDB VoltTable JSON format .
32,441
VoltTable semiDeepCopy ( ) { assert ( verifyTableInvariants ( ) ) ; final VoltTable cloned = new VoltTable ( m_extraMetadata ) ; cloned . m_colCount = m_colCount ; cloned . m_rowCount = m_rowCount ; cloned . m_rowStart = m_rowStart ; cloned . m_buffer = m_buffer . duplicate ( ) ; cloned . m_activeRowIndex = m_activeRowIndex ; cloned . m_hasCalculatedOffsets = m_hasCalculatedOffsets ; cloned . m_memoizedBufferOffset = m_memoizedBufferOffset ; cloned . m_memoizedRowOffset = m_memoizedRowOffset ; cloned . m_offsets = m_offsets == null ? null : m_offsets . clone ( ) ; cloned . m_position = m_position ; cloned . m_schemaString = m_schemaString == null ? null : m_schemaString . clone ( ) ; cloned . m_wasNull = m_wasNull ; cloned . m_readOnly = true ; assert ( verifyTableInvariants ( ) ) ; assert ( cloned . verifyTableInvariants ( ) ) ; return cloned ; }
Non - public method to duplicate a table . It s possible this might be useful to end - users of VoltDB but we should talk about naming and semantics first don t just make this public .
32,442
public ColumnInfo [ ] getTableSchema ( ) { ColumnInfo [ ] schema = new ColumnInfo [ m_colCount ] ; for ( int i = 0 ; i < m_colCount ; i ++ ) { ColumnInfo col = new ColumnInfo ( getColumnName ( i ) , getColumnType ( i ) ) ; schema [ i ] = col ; } return schema ; }
Get the schema of the table . Can be fed into another table s constructor .
32,443
public void checkProcessorConfig ( Properties properties ) { String exportClientClass = properties . getProperty ( EXPORT_TO_TYPE ) ; Preconditions . checkNotNull ( exportClientClass , "export to type is undefined or custom export plugin class missing." ) ; try { final Class < ? > clientClass = Class . forName ( exportClientClass ) ; ExportClientBase client = ( ExportClientBase ) clientClass . newInstance ( ) ; client . configure ( properties ) ; } catch ( Throwable t ) { throw new RuntimeException ( t ) ; } }
Pass processor specific processor configuration properties for checking
32,444
private long extractCommittedSpHandle ( ExportRow row , long committedSeqNo ) { long ret = 0 ; if ( committedSeqNo == ExportDataSource . NULL_COMMITTED_SEQNO ) { return ret ; } long seqNo = ( long ) row . values [ 2 ] ; if ( seqNo != committedSeqNo ) { return ret ; } ret = ( long ) row . values [ 0 ] ; return ret ; }
If the row is the last committed row return the SpHandle otherwise return 0
32,445
public void processMaterializedViewWarnings ( Database db , HashMap < Table , String > matViewMap ) throws VoltCompilerException { for ( Table table : db . getTables ( ) ) { for ( MaterializedViewInfo mvInfo : table . getViews ( ) ) { for ( Statement stmt : mvInfo . getFallbackquerystmts ( ) ) { if ( needsWarningForSingleTableView ( getPlanNodeTreeFromCatalogStatement ( db , stmt ) ) ) { m_compiler . addWarn ( "No index found to support UPDATE and DELETE on some of the min() / max() columns " + "in the materialized view " + mvInfo . getTypeName ( ) + ", and a sequential scan might be issued when current min / max value is updated / deleted." ) ; break ; } } } MaterializedViewHandlerInfo mvHandlerInfo = table . getMvhandlerinfo ( ) . get ( "mvHandlerInfo" ) ; if ( mvHandlerInfo != null ) { Statement createQueryStatement = mvHandlerInfo . getCreatequery ( ) . get ( "createQuery" ) ; if ( needsWarningForJoinQueryView ( getPlanNodeTreeFromCatalogStatement ( db , createQueryStatement ) ) ) { m_compiler . addWarn ( "No index found to support some of the join operations required to refresh the materialized view " + table . getTypeName ( ) + ". The refreshing may be slow." ) ; } } } }
Process materialized view warnings .
32,446
public static MaterializedViewInfo getMaterializedViewInfo ( Table tbl ) { MaterializedViewInfo mvInfo = null ; Table source = tbl . getMaterializer ( ) ; if ( source != null ) { mvInfo = source . getViews ( ) . get ( tbl . getTypeName ( ) ) ; } return mvInfo ; }
If the argument table is a single - table materialized view then return the attendant MaterializedViewInfo object . Otherwise return null .
32,447
public static long getFragmentIdForPlanHash ( byte [ ] planHash ) { Sha1Wrapper key = new Sha1Wrapper ( planHash ) ; FragInfo frag = null ; synchronized ( FragInfo . class ) { frag = m_plansByHash . get ( key ) ; } assert ( frag != null ) ; return frag . fragId ; }
Get the site - local fragment id for a given plan identified by 20 - byte sha - 1 hash
32,448
public static String getStmtTextForPlanHash ( byte [ ] planHash ) { Sha1Wrapper key = new Sha1Wrapper ( planHash ) ; FragInfo frag = null ; synchronized ( FragInfo . class ) { frag = m_plansByHash . get ( key ) ; } assert ( frag != null ) ; assert ( frag . stmtText != null ) ; return frag . stmtText ; }
Get the statement text for the fragment identified by its hash
32,449
public static long loadOrAddRefPlanFragment ( byte [ ] planHash , byte [ ] plan , String stmtText ) { Sha1Wrapper key = new Sha1Wrapper ( planHash ) ; synchronized ( FragInfo . class ) { FragInfo frag = m_plansByHash . get ( key ) ; if ( frag == null ) { frag = new FragInfo ( key , plan , m_nextFragId ++ , stmtText ) ; m_plansByHash . put ( frag . hash , frag ) ; m_plansById . put ( frag . fragId , frag ) ; if ( m_plansById . size ( ) > ExecutionEngine . EE_PLAN_CACHE_SIZE ) { evictLRUfragment ( ) ; } } if ( frag . stmtText == null ) { frag . stmtText = stmtText ; } frag . refCount ++ ; return frag . fragId ; } }
Get the site - local fragment id for a given plan identified by 20 - byte sha - 1 hash If the plan isn t known to this SPC load it up . Otherwise addref it .
32,450
public static byte [ ] planForFragmentId ( long fragmentId ) { assert ( fragmentId > 0 ) ; FragInfo frag = null ; synchronized ( FragInfo . class ) { frag = m_plansById . get ( fragmentId ) ; } assert ( frag != null ) ; return frag . plan ; }
Get the full JSON plan associated with a given site - local fragment id . Called by the EE
32,451
public List < AbstractExpression > bindingToIndexedExpression ( AbstractExpression expr ) { if ( m_type != expr . m_type ) { return null ; } if ( ! hasEqualAttributes ( expr ) ) { return null ; } if ( ( expr . m_left == null ) != ( m_left == null ) ) { return null ; } if ( ( expr . m_right == null ) != ( m_right == null ) ) { return null ; } if ( ( expr . m_args == null ) != ( m_args == null ) ) { return null ; } List < AbstractExpression > leftBindings = null ; if ( m_left != null ) { leftBindings = m_left . bindingToIndexedExpression ( expr . m_left ) ; if ( leftBindings == null ) { return null ; } } List < AbstractExpression > rightBindings = null ; if ( m_right != null ) { rightBindings = m_right . bindingToIndexedExpression ( expr . m_right ) ; if ( rightBindings == null ) { return null ; } } List < AbstractExpression > argBindings = null ; if ( m_args != null ) { if ( m_args . size ( ) != expr . m_args . size ( ) ) { return null ; } argBindings = new ArrayList < > ( ) ; int ii = 0 ; for ( AbstractExpression rhs : expr . m_args ) { AbstractExpression lhs = m_args . get ( ii ++ ) ; List < AbstractExpression > moreBindings = lhs . bindingToIndexedExpression ( rhs ) ; if ( moreBindings == null ) { return null ; } argBindings . addAll ( moreBindings ) ; } } List < AbstractExpression > result = new ArrayList < > ( ) ; if ( leftBindings != null ) { result . addAll ( leftBindings ) ; } if ( rightBindings != null ) { result . addAll ( rightBindings ) ; } if ( argBindings != null ) { result . addAll ( argBindings ) ; } return result ; }
strict expression equality that didn t involve parameters .
32,452
public static void toJSONArrayFromSortList ( JSONStringer stringer , List < AbstractExpression > sortExpressions , List < SortDirectionType > sortDirections ) throws JSONException { stringer . key ( SortMembers . SORT_COLUMNS ) ; stringer . array ( ) ; int listSize = sortExpressions . size ( ) ; for ( int ii = 0 ; ii < listSize ; ii ++ ) { stringer . object ( ) ; stringer . key ( SortMembers . SORT_EXPRESSION ) . object ( ) ; sortExpressions . get ( ii ) . toJSONString ( stringer ) ; stringer . endObject ( ) ; if ( sortDirections != null ) { stringer . keySymbolValuePair ( SortMembers . SORT_DIRECTION , sortDirections . get ( ii ) . toString ( ) ) ; } stringer . endObject ( ) ; } stringer . endArray ( ) ; }
Given a JSONStringer and a sequence of sort expressions and directions serialize the sort expressions . These will be in an array which is the value of SortMembers . SORT_COLUMNS in the current object of the JSONString . The JSONString should be in object state not array state .
32,453
public static void loadSortListFromJSONArray ( List < AbstractExpression > sortExpressions , List < SortDirectionType > sortDirections , JSONObject jobj ) throws JSONException { if ( jobj . has ( SortMembers . SORT_COLUMNS ) ) { sortExpressions . clear ( ) ; if ( sortDirections != null ) { sortDirections . clear ( ) ; } JSONArray jarray = jobj . getJSONArray ( SortMembers . SORT_COLUMNS ) ; int size = jarray . length ( ) ; for ( int ii = 0 ; ii < size ; ++ ii ) { JSONObject tempObj = jarray . getJSONObject ( ii ) ; sortExpressions . add ( fromJSONChild ( tempObj , SortMembers . SORT_EXPRESSION ) ) ; if ( sortDirections == null || ! tempObj . has ( SortMembers . SORT_DIRECTION ) ) { continue ; } String sdAsString = tempObj . getString ( SortMembers . SORT_DIRECTION ) ; sortDirections . add ( SortDirectionType . get ( sdAsString ) ) ; } } assert ( sortDirections == null || sortExpressions . size ( ) == sortDirections . size ( ) ) ; }
Load two lists from a JSONObject . One list is for sort expressions and the other is for sort directions . The lists are cleared before they are filled in . This is the inverse of toJSONArrayFromSortList .
32,454
public static List < AbstractExpression > loadFromJSONArrayChild ( List < AbstractExpression > starter , JSONObject parent , String label , StmtTableScan tableScan ) throws JSONException { if ( parent . isNull ( label ) ) { return null ; } JSONArray jarray = parent . getJSONArray ( label ) ; return loadFromJSONArray ( starter , jarray , tableScan ) ; }
For TVEs it is only serialized column index and table index . In order to match expression there needs more information to revert back the table name table alisa and column name . By adding
32,455
public AbstractExpression replaceWithTVE ( Map < AbstractExpression , Integer > aggTableIndexMap , Map < Integer , ParsedColInfo > indexToColumnMap ) { Integer ii = aggTableIndexMap . get ( this ) ; if ( ii != null ) { ParsedColInfo col = indexToColumnMap . get ( ii ) ; TupleValueExpression tve = new TupleValueExpression ( col . m_tableName , col . m_tableAlias , col . m_columnName , col . m_alias , this , ii ) ; if ( this instanceof TupleValueExpression ) { tve . setOrigStmtId ( ( ( TupleValueExpression ) this ) . getOrigStmtId ( ) ) ; } if ( hasAnySubexpressionOfClass ( AggregateExpression . class ) ) { tve . setHasAggregate ( true ) ; } return tve ; } AbstractExpression lnode = null ; AbstractExpression rnode = null ; if ( m_left != null ) { lnode = m_left . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; } if ( m_right != null ) { rnode = m_right . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; } ArrayList < AbstractExpression > newArgs = null ; boolean changed = false ; if ( m_args != null ) { newArgs = new ArrayList < > ( ) ; for ( AbstractExpression expr : m_args ) { AbstractExpression ex = expr . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; newArgs . add ( ex ) ; if ( ex != expr ) { changed = true ; } } } if ( m_left != lnode || m_right != rnode || changed ) { AbstractExpression resExpr = clone ( ) ; resExpr . setLeft ( lnode ) ; resExpr . setRight ( rnode ) ; resExpr . setArgs ( newArgs ) ; return resExpr ; } return this ; }
This function recursively replaces any subexpression matching an entry in aggTableIndexMap with an equivalent TVE . Its column index and alias are also built up here .
32,456
public boolean hasAnySubexpressionWithPredicate ( SubexprFinderPredicate pred ) { if ( pred . matches ( this ) ) { return true ; } if ( m_left != null && m_left . hasAnySubexpressionWithPredicate ( pred ) ) { return true ; } if ( m_right != null && m_right . hasAnySubexpressionWithPredicate ( pred ) ) { return true ; } if ( m_args != null ) { for ( AbstractExpression argument : m_args ) { if ( argument . hasAnySubexpressionWithPredicate ( pred ) ) { return true ; } } } return false ; }
Searches the expression tree rooted at this for nodes for which pred evaluates to true .
32,457
void refineOperandType ( VoltType valueType ) { if ( m_valueType != VoltType . NUMERIC ) { return ; } if ( valueType == VoltType . DECIMAL ) { m_valueType = VoltType . DECIMAL ; m_valueSize = VoltType . DECIMAL . getLengthInBytesForFixedTypes ( ) ; } else { m_valueType = VoltType . FLOAT ; m_valueSize = VoltType . FLOAT . getLengthInBytesForFixedTypes ( ) ; } }
Helper function to patch up NUMERIC typed constant operands and the functions and operators that they parameterize .
32,458
protected final void finalizeChildValueTypes ( ) { if ( m_left != null ) { m_left . finalizeValueTypes ( ) ; updateContentDeterminismMessage ( m_left . getContentDeterminismMessage ( ) ) ; } if ( m_right != null ) { m_right . finalizeValueTypes ( ) ; updateContentDeterminismMessage ( m_right . getContentDeterminismMessage ( ) ) ; } if ( m_args != null ) { for ( AbstractExpression argument : m_args ) { argument . finalizeValueTypes ( ) ; updateContentDeterminismMessage ( argument . getContentDeterminismMessage ( ) ) ; } } }
Do the recursive part of finalizeValueTypes as requested . Note that this updates the content non - determinism state .
32,459
protected final void resolveChildrenForTable ( Table table ) { if ( m_left != null ) { m_left . resolveForTable ( table ) ; } if ( m_right != null ) { m_right . resolveForTable ( table ) ; } if ( m_args != null ) { for ( AbstractExpression argument : m_args ) { argument . resolveForTable ( table ) ; } } }
Walk the expression tree resolving TVEs and function expressions as we go .
32,460
public boolean isValidExprForIndexesAndMVs ( StringBuffer msg , boolean isMV ) { if ( containsFunctionById ( FunctionSQL . voltGetCurrentTimestampId ( ) ) ) { msg . append ( "cannot include the function NOW or CURRENT_TIMESTAMP." ) ; return false ; } else if ( hasAnySubexpressionOfClass ( AggregateExpression . class ) ) { msg . append ( "cannot contain aggregate expressions." ) ; return false ; } else if ( hasAnySubexpressionOfClass ( AbstractSubqueryExpression . class ) ) { if ( isMV ) { msg . append ( "cannot contain subquery sources." ) ; } else { msg . append ( "cannot contain subqueries." ) ; } return false ; } else if ( hasUserDefinedFunctionExpression ( ) ) { msg . append ( "cannot contain calls to user defined functions." ) ; return false ; } else { return true ; } }
Return true if the given expression usable as part of an index or MV s group by and where clause expression . If false put the tail of an error message in the string buffer . The string buffer will be initialized with the name of the index .
32,461
public static boolean validateExprsForIndexesAndMVs ( List < AbstractExpression > checkList , StringBuffer msg , boolean isMV ) { for ( AbstractExpression expr : checkList ) { if ( ! expr . isValidExprForIndexesAndMVs ( msg , isMV ) ) { return false ; } } return true ; }
Return true if the all of the expressions in the list can be part of an index expression or in group by and where clause of MV . As with validateExprForIndexesAndMVs for individual expression the StringBuffer parameter msg contains the name of the index . Error messages should be appended to it .
32,462
private boolean containsFunctionById ( int functionId ) { if ( this instanceof AbstractValueExpression ) { return false ; } List < AbstractExpression > functionsList = findAllFunctionSubexpressions ( ) ; for ( AbstractExpression funcExpr : functionsList ) { assert ( funcExpr instanceof FunctionExpression ) ; if ( ( ( FunctionExpression ) funcExpr ) . hasFunctionId ( functionId ) ) { return true ; } } return false ; }
This function will recursively find any function expression with ID functionId . If found return true . Otherwise return false .
32,463
public boolean isValueTypeIndexable ( StringBuffer msg ) { if ( ! m_valueType . isIndexable ( ) ) { msg . append ( "expression of type " + m_valueType . getName ( ) ) ; return false ; } return true ; }
Returns true iff the expression is indexable . If the expression is not indexable expression information gets populated in the msg string buffer passed in .
32,464
public boolean isValueTypeUniqueIndexable ( StringBuffer msg ) { if ( ! isValueTypeIndexable ( msg ) ) { return false ; } if ( ! m_valueType . isUniqueIndexable ( ) ) { msg . append ( "expression of type " + m_valueType . getName ( ) ) ; return false ; } return true ; }
Returns true iff the expression is indexable in a unique index . If the expression is not indexable expression information gets populated in the msg string buffer passed in .
32,465
public void findUnsafeOperatorsForDDL ( UnsafeOperatorsForDDL ops ) { if ( ! m_type . isSafeForDDL ( ) ) { ops . add ( m_type . symbol ( ) ) ; } if ( m_left != null ) { m_left . findUnsafeOperatorsForDDL ( ops ) ; } if ( m_right != null ) { m_right . findUnsafeOperatorsForDDL ( ops ) ; } if ( m_args != null ) { for ( AbstractExpression arg : m_args ) { arg . findUnsafeOperatorsForDDL ( ops ) ; } } }
Returns true iff this expression is allowable when creating materialized views on nonempty tables . We have marked all the ExpressionType enumerals and all the function id integers which are safe . These are marked statically . So we just recurse through the tree looking at operation types and function types until we find something we don t like . If we get all the way through the search we are happy and return true .
32,466
public AbstractExpression getFirstArgument ( ) { if ( m_left != null ) { assert ( m_args == null ) ; return m_left ; } if ( m_args != null && m_args . size ( ) > 0 ) { assert ( m_left == null && m_right == null ) ; return m_args . get ( 0 ) ; } return null ; }
Ferret out the first argument . This can be m_left or else the first element of m_args .
32,467
public static byte [ ] getHashedPassword ( ClientAuthScheme scheme , String password ) { if ( password == null ) { return null ; } MessageDigest md = null ; try { md = MessageDigest . getInstance ( ClientAuthScheme . getDigestScheme ( scheme ) ) ; } catch ( NoSuchAlgorithmException e ) { e . printStackTrace ( ) ; System . exit ( - 1 ) ; } byte hashedPassword [ ] = null ; hashedPassword = md . digest ( password . getBytes ( Constants . UTF8ENCODING ) ) ; return hashedPassword ; }
Get a hashed password using SHA - 1 in a consistent way .
32,468
public static Object [ ] getAuthenticatedConnection ( String host , String username , byte [ ] hashedPassword , int port , final Subject subject , ClientAuthScheme scheme , long timeoutMillis ) throws IOException { String service = subject == null ? "database" : Constants . KERBEROS ; return getAuthenticatedConnection ( service , host , username , hashedPassword , port , subject , scheme , null , timeoutMillis ) ; }
Create a connection to a Volt server and authenticate the connection .
32,469
public JSONObject getJSONObjectForZK ( ) throws JSONException { final JSONObject jsObj = new JSONObject ( ) ; jsObj . put ( SnapshotUtil . JSON_PATH , m_path ) ; jsObj . put ( SnapshotUtil . JSON_PATH_TYPE , m_stype . toString ( ) ) ; jsObj . put ( SnapshotUtil . JSON_NONCE , m_nonce ) ; jsObj . put ( SnapshotUtil . JSON_BLOCK , m_blocking ) ; jsObj . put ( SnapshotUtil . JSON_FORMAT , m_format . toString ( ) ) ; jsObj . putOpt ( SnapshotUtil . JSON_DATA , m_data ) ; jsObj . putOpt ( SnapshotUtil . JSON_TERMINUS , m_terminus ) ; return jsObj ; }
When we write to ZK to request the snapshot generate the JSON which will be written to the node s data .
32,470
public static DatabaseSizes getCatalogSizes ( Database dbCatalog , boolean isXDCR ) { DatabaseSizes dbSizes = new DatabaseSizes ( ) ; for ( Table table : dbCatalog . getTables ( ) ) { dbSizes . addTable ( getTableSize ( table , isXDCR ) ) ; } return dbSizes ; }
Produce a sizing of all significant database objects .
32,471
static public void main ( String [ ] sa ) throws IOException , TarMalformatException { if ( sa . length < 1 ) { System . out . println ( RB . singleton . getString ( RB . TARGENERATOR_SYNTAX , DbBackup . class . getName ( ) ) ) ; System . exit ( 0 ) ; } TarGenerator generator = new TarGenerator ( new File ( sa [ 0 ] ) , true , null ) ; if ( sa . length == 1 ) { generator . queueEntry ( "stdin" , System . in , 10240 ) ; } else { for ( int i = 1 ; i < sa . length ; i ++ ) { generator . queueEntry ( new File ( sa [ i ] ) ) ; } } generator . write ( ) ; }
Creates specified tar file to contain specified files or stdin using default blocks - per - record and replacing tar file if it already exists .
32,472
public static byte [ ] fileToBytes ( File path ) throws IOException { FileInputStream fin = new FileInputStream ( path ) ; byte [ ] buffer = new byte [ ( int ) fin . getChannel ( ) . size ( ) ] ; try { if ( fin . read ( buffer ) == - 1 ) { throw new IOException ( "File " + path . getAbsolutePath ( ) + " is empty" ) ; } } finally { fin . close ( ) ; } return buffer ; }
Serialize a file into bytes . Used to serialize catalog and deployment file for UpdateApplicationCatalog on the client .
32,473
public VoltTable run ( SystemProcedureExecutionContext ctx , String tableName , String columnName , String compStr , VoltTable parameter , long chunksize ) { return nibbleDeleteCommon ( ctx , tableName , columnName , compStr , parameter , chunksize , true ) ; }
Nibble delete procedure for replicated tables
32,474
public byte [ ] read ( ) throws IOException { if ( m_exception . get ( ) != null ) { throw m_exception . get ( ) ; } byte bytes [ ] = null ; if ( m_activeConverters . get ( ) == 0 ) { bytes = m_available . poll ( ) ; } else { try { bytes = m_available . take ( ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } if ( bytes != null ) { m_availableBytes . addAndGet ( - 1 * bytes . length ) ; } return bytes ; }
Returns a more CSV data in UTF - 8 format . Returns null when there is no more data . May block .
32,475
public boolean compileFromDDL ( final String jarOutputPath , final String ... ddlFilePaths ) { if ( ddlFilePaths . length == 0 ) { compilerLog . error ( "At least one DDL file is required." ) ; return false ; } List < VoltCompilerReader > ddlReaderList ; try { ddlReaderList = DDLPathsToReaderList ( ddlFilePaths ) ; } catch ( VoltCompilerException e ) { compilerLog . error ( "Unable to open DDL file." , e ) ; return false ; } return compileInternalToFile ( jarOutputPath , null , null , ddlReaderList , null ) ; }
Compile from a set of DDL files .
32,476
public boolean compileDDLString ( String ddl , String jarPath ) { final File schemaFile = VoltProjectBuilder . writeStringToTempFile ( ddl ) ; schemaFile . deleteOnExit ( ) ; final String schemaPath = schemaFile . getPath ( ) ; return compileFromDDL ( jarPath , schemaPath ) ; }
Compile from DDL in a single string
32,477
public boolean compileEmptyCatalog ( final String jarOutputPath ) { List < VoltCompilerReader > ddlReaderList = new ArrayList < > ( 1 ) ; ddlReaderList . add ( new VoltCompilerStringReader ( "ddl.sql" , m_emptyDDLComment ) ) ; InMemoryJarfile jarFile = new InMemoryJarfile ( ) ; try { ddlReaderList . get ( 0 ) . putInJar ( jarFile , "ddl.sql" ) ; } catch ( IOException e ) { compilerLog . error ( "Failed to add DDL file to empty in-memory jar." ) ; return false ; } return compileInternalToFile ( jarOutputPath , null , null , ddlReaderList , jarFile ) ; }
Compile empty catalog jar
32,478
private void debugVerifyCatalog ( InMemoryJarfile origJarFile , Catalog origCatalog ) { final VoltCompiler autoGenCompiler = new VoltCompiler ( m_isXDCR ) ; autoGenCompiler . m_classLoader = origJarFile . getLoader ( ) ; List < VoltCompilerReader > autogenReaderList = new ArrayList < > ( 1 ) ; autogenReaderList . add ( new VoltCompilerJarFileReader ( origJarFile , AUTOGEN_DDL_FILE_NAME ) ) ; InMemoryJarfile autoGenJarOutput = new InMemoryJarfile ( ) ; autoGenCompiler . m_currentFilename = AUTOGEN_DDL_FILE_NAME ; Catalog autoGenCatalog = autoGenCompiler . compileCatalogInternal ( null , null , autogenReaderList , autoGenJarOutput ) ; if ( autoGenCatalog == null ) { Log . info ( "Did not verify catalog because it could not be compiled." ) ; return ; } FilteredCatalogDiffEngine diffEng = new FilteredCatalogDiffEngine ( origCatalog , autoGenCatalog , false ) ; String diffCmds = diffEng . commands ( ) ; if ( diffCmds != null && ! diffCmds . equals ( "" ) ) { if ( RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG ) { autoGenCatalog = replayFailedCatalogRebuildUnderDebug ( autoGenCompiler , autogenReaderList , autoGenJarOutput ) ; } diffEng = new FilteredCatalogDiffEngine ( origCatalog , autoGenCatalog , true ) ; diffCmds = diffEng . commands ( ) ; String crashAdvice = "Catalog Verification from Generated DDL failed! " + "VoltDB dev: Consider" + ( RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG ? "" : " setting VoltCompiler.RETRY_FAILED_CATALOG_REBUILD_UNDER_DEBUG = true and" ) + " setting a breakpoint in VoltCompiler.replayFailedCatalogRebuildUnderDebug" + " to debug a replay of the faulty catalog rebuild roundtrip. " ; VoltDB . crashLocalVoltDB ( crashAdvice + "The offending diffcmds were: " + diffCmds ) ; } else { Log . info ( "Catalog verification completed successfuly." ) ; } }
Internal method that takes the generated DDL from the catalog and builds a new catalog . The generated catalog is diffed with the original catalog to verify compilation and catalog generation consistency .
32,479
private Catalog replayFailedCatalogRebuildUnderDebug ( VoltCompiler autoGenCompiler , List < VoltCompilerReader > autogenReaderList , InMemoryJarfile autoGenJarOutput ) { Catalog autoGenCatalog = autoGenCompiler . compileCatalogInternal ( null , null , autogenReaderList , autoGenJarOutput ) ; return autoGenCatalog ; }
Take two steps back to retry and potentially debug a catalog rebuild that generated an unintended change . This code is PURPOSELY redundant with the mainline call in debugVerifyCatalog above . Keep the two calls in synch and only redirect through this function in the post - mortem replay after the other call created a flawed catalog .
32,480
HashMap < String , byte [ ] > getExplainPlans ( Catalog catalog ) { HashMap < String , byte [ ] > retval = new HashMap < > ( ) ; Database db = getCatalogDatabase ( m_catalog ) ; assert ( db != null ) ; for ( Procedure proc : db . getProcedures ( ) ) { for ( Statement stmt : proc . getStatements ( ) ) { String s = "SQL: " + stmt . getSqltext ( ) + "\n" ; s += "COST: " + Integer . toString ( stmt . getCost ( ) ) + "\n" ; s += "PLAN:\n\n" ; s += Encoder . hexDecodeToString ( stmt . getExplainplan ( ) ) + "\n" ; byte [ ] b = s . getBytes ( Constants . UTF8ENCODING ) ; retval . put ( proc . getTypeName ( ) + "_" + stmt . getTypeName ( ) + ".txt" , b ) ; } } return retval ; }
Get textual explain plan info for each plan from the catalog to be shoved into the catalog jarfile .
32,481
private Catalog compileCatalogInternal ( final VoltCompilerReader cannonicalDDLIfAny , final Catalog previousCatalogIfAny , final List < VoltCompilerReader > ddlReaderList , final InMemoryJarfile jarOutput ) { m_catalog = new Catalog ( ) ; m_catalog . execute ( "add / clusters cluster" ) ; m_catalog . getClusters ( ) . get ( "cluster" ) . setSecurityenabled ( false ) ; try { Database previousDBIfAny = null ; if ( previousCatalogIfAny != null ) { previousDBIfAny = previousCatalogIfAny . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) ; } compileDatabaseNode ( cannonicalDDLIfAny , previousDBIfAny , ddlReaderList , jarOutput ) ; } catch ( final VoltCompilerException e ) { return null ; } assert ( m_catalog != null ) ; final int epoch = ( int ) ( TransactionIdManager . getEpoch ( ) / 1000 ) ; m_catalog . getClusters ( ) . get ( "cluster" ) . setLocalepoch ( epoch ) ; return m_catalog ; }
Internal method for compiling the catalog .
32,482
private void addExtraClasses ( final InMemoryJarfile jarOutput ) throws VoltCompilerException { List < String > addedClasses = new ArrayList < > ( ) ; for ( String className : m_addedClasses ) { if ( ! jarOutput . containsKey ( className ) ) { try { Class < ? > clz = Class . forName ( className , true , m_classLoader ) ; if ( addClassToJar ( jarOutput , clz ) ) { addedClasses . add ( className ) ; } } catch ( Exception e ) { String msg = "Class %s could not be loaded/found/added to the jar." ; msg = String . format ( msg , className ) ; throw new VoltCompilerException ( msg ) ; } } } m_addedClasses = addedClasses . toArray ( new String [ 0 ] ) ; }
Once the DDL file is over take all of the extra classes found and add them to the jar .
32,483
public List < String > harvestCapturedDetail ( ) { List < String > harvested = m_capturedDiagnosticDetail ; m_capturedDiagnosticDetail = null ; return harvested ; }
Access recent plan output for diagnostic purposes
32,484
String getKeyPrefix ( StatementPartitioning partitioning , DeterminismMode detMode , String joinOrder ) { if ( partitioning . isInferred ( ) ) { return null ; } String joinOrderPrefix = "#" ; if ( joinOrder != null ) { joinOrderPrefix += joinOrder ; } boolean partitioned = partitioning . wasSpecifiedAsSingle ( ) ; return joinOrderPrefix + String . valueOf ( detMode . toChar ( ) ) + ( partitioned ? "P#" : "R#" ) ; }
Key prefix includes attributes that make a cached statement usable if they match
32,485
Statement getCachedStatement ( String keyPrefix , String sql ) { String key = keyPrefix + sql ; Statement candidate = m_previousCatalogStmts . get ( key ) ; if ( candidate == null ) { ++ m_stmtCacheMisses ; return null ; } String [ ] tablesTouched = candidate . getTablesread ( ) . split ( "," ) ; for ( String tableName : tablesTouched ) { if ( isDirtyTable ( tableName ) ) { ++ m_stmtCacheMisses ; return null ; } } tablesTouched = candidate . getTablesupdated ( ) . split ( "," ) ; for ( String tableName : tablesTouched ) { if ( isDirtyTable ( tableName ) ) { ++ m_stmtCacheMisses ; return null ; } } ++ m_stmtCacheHits ; return candidate ; }
Look for a match from the previous catalog that matches the key + sql
32,486
public HashRangeExpressionBuilder put ( Integer value1 , Integer value2 ) { m_builder . put ( value1 , value2 ) ; return this ; }
Add a value pair .
32,487
public HashRangeExpression build ( Integer hashColumnIndex ) { Map < Integer , Integer > ranges = m_builder . build ( ) ; HashRangeExpression predicate = new HashRangeExpression ( ) ; predicate . setRanges ( ranges ) ; predicate . setHashColumnIndex ( hashColumnIndex ) ; return predicate ; }
Generate a hash range expression .
32,488
public OrderableTransaction poll ( ) { OrderableTransaction retval = null ; updateQueueState ( ) ; if ( m_state == QueueState . UNBLOCKED ) { retval = super . peek ( ) ; super . poll ( ) ; assert ( retval != null ) ; } return retval ; }
Only return transaction state objects that are ready to run .
32,489
public boolean add ( OrderableTransaction txnState ) { if ( m_initiatorData . containsKey ( txnState . initiatorHSId ) == false ) { return false ; } boolean retval = super . add ( txnState ) ; if ( retval ) updateQueueState ( ) ; return retval ; }
Drop data for unknown initiators . This is the only valid add interface .
32,490
public long noteTransactionRecievedAndReturnLastSeen ( long initiatorHSId , long txnId , long lastSafeTxnIdFromInitiator ) { assert ( txnId != 0 ) ; if ( m_initiatorData . containsKey ( initiatorHSId ) == false ) { return DtxnConstants . DUMMY_LAST_SEEN_TXN_ID ; } LastInitiatorData lid = m_initiatorData . get ( initiatorHSId ) ; if ( lid . m_lastSeenTxnId < txnId ) lid . m_lastSeenTxnId = txnId ; if ( lid . m_lastSafeTxnId < lastSafeTxnIdFromInitiator ) lid . m_lastSafeTxnId = lastSafeTxnIdFromInitiator ; long min = Long . MAX_VALUE ; for ( LastInitiatorData l : m_initiatorData . values ( ) ) if ( l . m_lastSeenTxnId < min ) min = l . m_lastSeenTxnId ; m_newestCandidateTransaction = min ; updateQueueState ( ) ; return lid . m_lastSeenTxnId ; }
Update the information stored about the latest transaction seen from each initiator . Compute the newest safe transaction id .
32,491
public void gotFaultForInitiator ( long initiatorId ) { noteTransactionRecievedAndReturnLastSeen ( initiatorId , Long . MAX_VALUE , DtxnConstants . DUMMY_LAST_SEEN_TXN_ID ) ; LastInitiatorData remove = m_initiatorData . remove ( initiatorId ) ; assert ( remove != null ) ; }
Remove all pending transactions from the specified initiator and do not require heartbeats from that initiator to proceed .
32,492
public int ensureInitiatorIsKnown ( long initiatorId ) { int newInitiatorCount = 0 ; if ( m_initiatorData . get ( initiatorId ) == null ) { m_initiatorData . put ( initiatorId , new LastInitiatorData ( ) ) ; newInitiatorCount ++ ; } return newInitiatorCount ; }
After a catalog change double check that all initators in the catalog that are known to be up are here in the RPQ s list .
32,493
public Long getNewestSafeTransactionForInitiator ( Long initiatorId ) { LastInitiatorData lid = m_initiatorData . get ( initiatorId ) ; if ( lid == null ) { return null ; } return lid . m_lastSafeTxnId ; }
Return the largest confirmed txn id for the initiator given . Used to figure out what to do after an initiator fails .
32,494
public Long safeToRecover ( ) { boolean safe = true ; for ( LastInitiatorData data : m_initiatorData . values ( ) ) { final long lastSeenTxnId = data . m_lastSeenTxnId ; if ( lastSeenTxnId == DtxnConstants . DUMMY_LAST_SEEN_TXN_ID ) { safe = false ; } } if ( ! safe ) { return null ; } OrderableTransaction next = peek ( ) ; if ( next == null ) { if ( m_state == QueueState . BLOCKED_EMPTY ) { return m_newestCandidateTransaction ; } else if ( m_state == QueueState . BLOCKED_SAFETY ) { return null ; } else if ( m_state == QueueState . BLOCKED_ORDERING ) { return null ; } m_recoveryLog . error ( "Unexpected RPQ state " + m_state + " when attempting to start recovery at " + " the source site. Consider killing the recovering node and trying again" ) ; return null ; } else { return next . txnId ; } }
Determine if it is safe to recover and if it is what txnid it is safe to recover at . Recovery is initiated by the recovering source partition . It can t be initiated until the recovering partition has heard from every initiator . This is because it is not possible to pick a point in the global txn ordering for the recovery to start at where all subsequent procedure invocations that need to be applied after recovery are available unless every initiator has been heard from .
32,495
public void unauthenticate ( HttpServletRequest request ) { if ( HTTP_DONT_USE_SESSION ) return ; HttpSession session = request . getSession ( false ) ; if ( session != null ) { session . removeAttribute ( AUTH_USER_SESSION_KEY ) ; session . invalidate ( ) ; } }
reuses it and happily validates it .
32,496
public AuthenticationResult authenticate ( HttpServletRequest request ) { HttpSession session = null ; AuthenticationResult authResult = null ; if ( ! HTTP_DONT_USE_SESSION && ! m_dontUseSession ) { try { session = request . getSession ( ) ; if ( session != null ) { if ( session . isNew ( ) ) { session . setMaxInactiveInterval ( MAX_SESSION_INACTIVITY_SECONDS ) ; } authResult = ( AuthenticationResult ) session . getAttribute ( AUTH_USER_SESSION_KEY ) ; } } catch ( Exception ex ) { session = null ; m_rate_limited_log . log ( EstTime . currentTimeMillis ( ) , Level . ERROR , ex , "Failed to get or create HTTP Session. authenticating user explicitely." ) ; } } if ( authResult == null ) { authResult = getAuthenticationResult ( request ) ; if ( ! authResult . isAuthenticated ( ) ) { if ( session != null ) { session . removeAttribute ( AUTH_USER_SESSION_KEY ) ; } m_rate_limited_log . log ( "JSON interface exception: " + authResult . m_message , EstTime . currentTimeMillis ( ) ) ; } else { if ( session != null ) { session . setAttribute ( AUTH_USER_SESSION_KEY , authResult ) ; } } } return authResult ; }
Look to get session if no session found or created fallback to always authenticate mode .
32,497
public static FunctionSQL newSQLFunction ( String token , CompileContext context ) { int id = regularFuncMap . get ( token , - 1 ) ; if ( id == - 1 ) { id = valueFuncMap . get ( token , - 1 ) ; } if ( id == - 1 ) { return null ; } FunctionSQL function = new FunctionSQL ( id ) ; if ( id == FUNC_VALUE ) { if ( context . currentDomain == null ) { return null ; } function . dataType = context . currentDomain ; } return function ; }
End of VoltDB extension
32,498
public ProcessTxnResult processTxn ( TxnHeader hdr , Record txn ) { return dataTree . processTxn ( hdr , txn ) ; }
the process txn on the data
32,499
public Stat statNode ( String path , ServerCnxn serverCnxn ) throws KeeperException . NoNodeException { return dataTree . statNode ( path , serverCnxn ) ; }
stat the path