idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
30,900
void addToFullRights ( HashMap map ) { Iterator it = map . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Object key = it . next ( ) ; Right add = ( Right ) map . get ( key ) ; Right existing = ( Right ) fullRightsMap . get ( key ) ; if ( existing == null ) { existing = add . duplicate ( ) ; fullRightsMap . put ( key , existing ) ; } else { existing . add ( add ) ; } if ( add . grantableRights == null ) { continue ; } if ( existing . grantableRights == null ) { existing . grantableRights = add . grantableRights . duplicate ( ) ; } else { existing . grantableRights . add ( add . grantableRights ) ; } } }
Full or partial rights are added to existing
30,901
public void toLeftJoin ( ) { assert ( ( m_leftNode != null && m_rightNode != null ) || ( m_leftNode == null && m_rightNode == null ) ) ; if ( m_leftNode == null && m_rightNode == null ) { return ; } if ( m_leftNode instanceof BranchNode ) { ( ( BranchNode ) m_leftNode ) . toLeftJoin ( ) ; } if ( m_rightNode instanceof BranchNode ) { ( ( BranchNode ) m_rightNode ) . toLeftJoin ( ) ; } if ( m_joinType == JoinType . RIGHT ) { JoinNode node = m_rightNode ; m_rightNode = m_leftNode ; m_leftNode = node ; m_joinType = JoinType . LEFT ; } }
Transform all RIGHT joins from the tree into the LEFT ones by swapping the nodes and their join types
30,902
protected void extractSubTree ( List < JoinNode > leafNodes ) { JoinNode [ ] children = { m_leftNode , m_rightNode } ; for ( JoinNode child : children ) { if ( ! ( child instanceof BranchNode ) ) { continue ; } if ( ( ( BranchNode ) child ) . m_joinType == m_joinType ) { child . extractSubTree ( leafNodes ) ; } else { leafNodes . add ( child ) ; JoinNode tempNode = new TableLeafNode ( - child . m_id , child . m_joinExpr , child . m_whereExpr , null ) ; if ( child == m_leftNode ) { m_leftNode = tempNode ; } else { m_rightNode = tempNode ; } } } }
Starting from the root recurse to its children stopping at the first join node of the different type and discontinue the tree at this point by replacing the join node with the temporary node which id matches the join node id . This join node is the root of the next sub - tree .
30,903
public boolean hasOuterJoin ( ) { assert ( m_leftNode != null && m_rightNode != null ) ; return m_joinType != JoinType . INNER || m_leftNode . hasOuterJoin ( ) || m_rightNode . hasOuterJoin ( ) ; }
Returns true if one of the tree nodes has outer join
30,904
public void extractEphemeralTableQueries ( List < StmtEphemeralTableScan > scans ) { if ( m_leftNode != null ) { m_leftNode . extractEphemeralTableQueries ( scans ) ; } if ( m_rightNode != null ) { m_rightNode . extractEphemeralTableQueries ( scans ) ; } }
Returns a list of immediate sub - queries which are part of this query .
30,905
public boolean allInnerJoins ( ) { return m_joinType == JoinType . INNER && ( m_leftNode == null || m_leftNode . allInnerJoins ( ) ) && ( m_rightNode == null || m_rightNode . allInnerJoins ( ) ) ; }
Returns if all the join operations within this join tree are inner joins .
30,906
public static void apply ( CompiledPlan plan , DeterminismMode detMode ) { if ( detMode == DeterminismMode . FASTER ) { return ; } if ( plan . hasDeterministicStatement ( ) ) { return ; } AbstractPlanNode planGraph = plan . rootPlanGraph ; if ( planGraph . isOrderDeterministic ( ) ) { return ; } AbstractPlanNode root = plan . rootPlanGraph ; root = recursivelyApply ( root ) ; plan . rootPlanGraph = root ; }
Only applies when stronger determinism is needed .
30,907
public void updateLastSeenUniqueIds ( VoltMessage message ) { long sequenceWithUniqueId = Long . MIN_VALUE ; boolean commandLog = ( message instanceof TransactionInfoBaseMessage && ( ( ( TransactionInfoBaseMessage ) message ) . isForReplay ( ) ) ) ; boolean sentinel = message instanceof MultiPartitionParticipantMessage ; if ( commandLog || sentinel ) { sequenceWithUniqueId = ( ( TransactionInfoBaseMessage ) message ) . getUniqueId ( ) ; m_replaySequencer . updateLastSeenUniqueId ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; m_replaySequencer . updateLastPolledUniqueId ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; } }
Update last seen uniqueIds in the replay sequencer . This is used on MPI repair .
30,908
public void parseRestoreResultRow ( VoltTable vt ) { RestoreResultKey key = new RestoreResultKey ( ( int ) vt . getLong ( "HOST_ID" ) , ( int ) vt . getLong ( "PARTITION_ID" ) , vt . getString ( "TABLE" ) ) ; if ( containsKey ( key ) ) { get ( key ) . mergeData ( vt . getString ( "RESULT" ) . equals ( "SUCCESS" ) , vt . getString ( "ERR_MSG" ) ) ; } else { put ( key , new RestoreResultValue ( ( int ) vt . getLong ( "SITE_ID" ) , vt . getString ( "RESULT" ) . equals ( "SUCCESS" ) , vt . getString ( "HOSTNAME" ) , vt . getString ( "ERR_MSG" ) ) ) ; } }
Parse a restore result table row and add to the set .
30,909
public static < E extends Comparable > int binarySearch ( List < ? extends E > list , E e , KeyPresentBehavior presentBehavior , KeyAbsentBehavior absentBehavior ) { checkNotNull ( e ) ; return binarySearch ( list , e , Ordering . natural ( ) , presentBehavior , absentBehavior ) ; }
Searches the specified naturally ordered list for the specified object using the binary search algorithm .
30,910
private final < T > ImmutableList < Callable < T > > wrapTasks ( Collection < ? extends Callable < T > > tasks ) { ImmutableList . Builder < Callable < T > > builder = ImmutableList . builder ( ) ; for ( Callable < T > task : tasks ) { builder . add ( wrapTask ( task ) ) ; } return builder . build ( ) ; }
Wraps a collection of tasks .
30,911
public void loadProcedures ( CatalogContext catalogContext , boolean isInitOrReplay ) { m_defaultProcManager = catalogContext . m_defaultProcs ; m_defaultProcCache . clear ( ) ; m_plannerTool = catalogContext . m_ptool ; m_sysProcs = loadSystemProcedures ( catalogContext , m_site ) ; try { if ( isInitOrReplay ) { m_userProcs = loadUserProcedureRunners ( catalogContext . database . getProcedures ( ) , catalogContext . getCatalogJar ( ) . getLoader ( ) , null , m_site ) ; } else { m_userProcs = catalogContext . getPreparedUserProcedureRunners ( m_site ) ; } } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Error trying to load user procedures: " + e . getMessage ( ) ) ; } }
Load procedures .
30,912
private static SQLPatternPart makeInnerProcedureModifierClausePattern ( boolean captureTokens ) { return SPF . oneOf ( SPF . clause ( SPF . token ( "allow" ) , SPF . group ( captureTokens , SPF . commaList ( SPF . userName ( ) ) ) ) , SPF . clause ( SPF . token ( "partition" ) , SPF . token ( "on" ) , SPF . token ( "table" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) , SPF . token ( "column" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) , SPF . optional ( SPF . clause ( SPF . token ( "parameter" ) , SPF . group ( captureTokens , SPF . integer ( ) ) ) ) , SPF . optional ( SPF . clause ( SPF . token ( "and" ) , SPF . token ( "on" ) , SPF . token ( "table" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) , SPF . token ( "column" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) , SPF . optional ( SPF . clause ( SPF . token ( "parameter" ) , SPF . group ( captureTokens , SPF . integer ( ) ) ) ) ) ) ) ) ; }
Build a pattern segment to accept a single optional ALLOW or PARTITION clause to modify CREATE PROCEDURE statements .
30,913
static SQLPatternPart unparsedProcedureModifierClauses ( ) { return SPF . capture ( SPF . repeat ( makeInnerProcedureModifierClausePattern ( false ) ) ) . withFlags ( SQLPatternFactory . ADD_LEADING_SPACE_TO_CHILD ) ; }
Build a pattern segment to recognize all the ALLOW or PARTITION modifier clauses of a CREATE PROCEDURE statement .
30,914
private static SQLPatternPart makeInnerStreamModifierClausePattern ( boolean captureTokens ) { return SPF . oneOf ( SPF . clause ( SPF . token ( "export" ) , SPF . token ( "to" ) , SPF . token ( "target" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) ) , SPF . clause ( SPF . token ( "partition" ) , SPF . token ( "on" ) , SPF . token ( "column" ) , SPF . group ( captureTokens , SPF . databaseObjectName ( ) ) ) ) ; }
Build a pattern segment to accept a single optional EXPORT or PARTITION clause to modify CREATE STREAM statements .
30,915
private static SQLPatternPart unparsedStreamModifierClauses ( ) { return SPF . capture ( SPF . repeat ( makeInnerStreamModifierClausePattern ( false ) ) ) . withFlags ( SQLPatternFactory . ADD_LEADING_SPACE_TO_CHILD ) ; }
Build a pattern segment to recognize all the EXPORT or PARTITION modifier clauses of a CREATE STREAM statement .
30,916
private static List < String > parseExecParameters ( String paramText ) { final String SafeParamStringValuePattern = "#(SQL_PARSER_SAFE_PARAMSTRING)" ; ArrayList < String > originalString = new ArrayList < > ( ) ; Matcher stringMatcher = SingleQuotedString . matcher ( paramText ) ; StringBuilder safeText = new StringBuilder ( ) ; while ( stringMatcher . find ( ) ) { safeText . append ( paramText . substring ( 0 , stringMatcher . start ( ) ) ) ; String asMatched = stringMatcher . group ( ) ; if ( SingleQuotedStringContainingParameterSeparators . matcher ( asMatched ) . matches ( ) ) { originalString . add ( asMatched ) ; safeText . append ( SafeParamStringValuePattern ) ; } else { safeText . append ( asMatched ) ; } paramText = paramText . substring ( stringMatcher . end ( ) ) ; stringMatcher = SingleQuotedString . matcher ( paramText ) ; } safeText . append ( paramText ) ; ArrayList < String > params = new ArrayList < > ( ) ; int subCount = 0 ; int neededSubs = originalString . size ( ) ; String [ ] split = safeText . toString ( ) . split ( "[\\s,]+" ) ; for ( String fragment : split ) { if ( fragment . isEmpty ( ) ) { continue ; } if ( subCount < neededSubs ) { while ( fragment . indexOf ( SafeParamStringValuePattern ) > - 1 ) { fragment = fragment . replace ( SafeParamStringValuePattern , originalString . get ( subCount ) ) ; ++ subCount ; } } params . add ( fragment ) ; } assert ( subCount == neededSubs ) ; return params ; }
to the extent that comments are supported they have already been stripped out .
30,917
public static ParseRecallResults parseRecallStatement ( String statement , int lineMax ) { Matcher matcher = RecallToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; String lineNumberText = matcher . group ( 2 ) ; String error ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { String trailings = matcher . group ( 3 ) + ";" + matcher . group ( 4 ) ; if ( trailings . equals ( ";" ) ) { try { int line = Integer . parseInt ( lineNumberText ) - 1 ; if ( line < 0 || line > lineMax ) { throw new NumberFormatException ( ) ; } return new ParseRecallResults ( line ) ; } catch ( NumberFormatException e ) { error = "Invalid RECALL line number argument: '" + lineNumberText + "'" ; } } else { error = "Invalid RECALL line number argument: '" + lineNumberText + " " + trailings + "'" ; } } else if ( commandWordTerminator . equals ( "" ) || commandWordTerminator . equals ( ";" ) ) { error = "Incomplete RECALL command. RECALL expects a line number argument." ; } else { error = "Invalid RECALL command: a space and line number are required after 'recall'" ; } return new ParseRecallResults ( error ) ; } return null ; }
Parse RECALL statement for sqlcmd .
30,918
public static List < FileInfo > parseFileStatement ( FileInfo parentContext , String statement ) { Matcher fileMatcher = FileToken . matcher ( statement ) ; if ( ! fileMatcher . lookingAt ( ) ) { return null ; } String remainder = statement . substring ( fileMatcher . end ( ) , statement . length ( ) ) ; List < FileInfo > filesInfo = new ArrayList < > ( ) ; Matcher inlineBatchMatcher = DashInlineBatchToken . matcher ( remainder ) ; if ( inlineBatchMatcher . lookingAt ( ) ) { remainder = remainder . substring ( inlineBatchMatcher . end ( ) , remainder . length ( ) ) ; Matcher delimiterMatcher = DelimiterToken . matcher ( remainder ) ; if ( delimiterMatcher . matches ( ) ) { String delimiter = delimiterMatcher . group ( 1 ) ; filesInfo . add ( new FileInfo ( parentContext , FileOption . INLINEBATCH , delimiter ) ) ; return filesInfo ; } throw new SQLParser . Exception ( "Did not find valid delimiter for \"file -inlinebatch\" command." ) ; } FileOption option = FileOption . PLAIN ; Matcher batchMatcher = DashBatchToken . matcher ( remainder ) ; if ( batchMatcher . lookingAt ( ) ) { option = FileOption . BATCH ; remainder = remainder . substring ( batchMatcher . end ( ) , remainder . length ( ) ) ; } remainder = remainder . trim ( ) ; List < String > filenames = new ArrayList < > ( ) ; Pattern regex = Pattern . compile ( "[^\\s\']+|'[^']*'" ) ; Matcher regexMatcher = regex . matcher ( remainder ) ; while ( regexMatcher . find ( ) ) { filenames . add ( regexMatcher . group ( ) ) ; } for ( String filename : filenames ) { Matcher filenameMatcher = FilenameToken . matcher ( filename ) ; if ( filenameMatcher . matches ( ) ) { filename = filenameMatcher . group ( 1 ) ; filename = filename . trim ( ) ; if ( filename . startsWith ( "~" ) ) { filename = filename . replaceFirst ( "~" , System . getProperty ( "user.home" ) ) ; } filesInfo . add ( new FileInfo ( parentContext , option , filename ) ) ; } } if ( filesInfo . size ( ) == 0 ) { String msg = String . format ( "Did not find valid file name in \"file%s\" command." , option == FileOption . BATCH ? " -batch" : "" ) ; throw new SQLParser . Exception ( msg ) ; } return filesInfo ; }
Parse FILE statement for sqlcmd .
30,919
public static String parseShowStatementSubcommand ( String statement ) { Matcher matcher = ShowToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { String trailings = matcher . group ( 3 ) + ";" + matcher . group ( 4 ) ; if ( trailings . equals ( ";" ) ) { return matcher . group ( 2 ) ; } return matcher . group ( 2 ) + " " + trailings ; } if ( commandWordTerminator . equals ( "" ) || commandWordTerminator . equals ( ";" ) ) { return commandWordTerminator ; } } return null ; }
Parse a SHOW or LIST statement for sqlcmd .
30,920
public static String parseHelpStatement ( String statement ) { Matcher matcher = HelpToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { String trailings = matcher . group ( 3 ) + ";" + matcher . group ( 4 ) ; if ( trailings . equals ( ";" ) ) { return matcher . group ( 2 ) ; } return matcher . group ( 2 ) + " " + trailings ; } if ( commandWordTerminator . equals ( "" ) || commandWordTerminator . equals ( ";" ) ) { return "" ; } return matcher . group ( 1 ) . trim ( ) ; } return null ; }
Parse HELP statement for sqlcmd . The sub - command will be if the user just typed HELP .
30,921
public static String getDigitsFromHexLiteral ( String paramString ) { Matcher matcher = SingleQuotedHexLiteral . matcher ( paramString ) ; if ( matcher . matches ( ) ) { return matcher . group ( 1 ) ; } return null ; }
Given a parameter string if it s of the form x 0123456789ABCDEF return a string containing just the digits . Otherwise return null .
30,922
public static long hexDigitsToLong ( String hexDigits ) throws SQLParser . Exception { if ( hexDigits . length ( ) > 16 ) { throw new SQLParser . Exception ( "Too many hexadecimal digits for BIGINT value" ) ; } if ( hexDigits . length ( ) == 0 ) { throw new SQLParser . Exception ( "Zero hexadecimal digits is invalid for BIGINT value" ) ; } long val = new BigInteger ( hexDigits , 16 ) . longValue ( ) ; return val ; }
Given a string of hex digits produce a long value assuming a 2 s complement representation .
30,923
public static ExecuteCallResults parseExecuteCall ( String statement , Map < String , Map < Integer , List < String > > > procedures ) throws SQLParser . Exception { assert ( procedures != null ) ; return parseExecuteCallInternal ( statement , procedures ) ; }
Parse EXECUTE procedure call .
30,924
private static ExecuteCallResults parseExecuteCallInternal ( String statement , Map < String , Map < Integer , List < String > > > procedures ) throws SQLParser . Exception { Matcher matcher = ExecuteCallPreamble . matcher ( statement ) ; if ( ! matcher . lookingAt ( ) ) { return null ; } String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) || commandWordTerminator . equals ( "," ) ) { ExecuteCallResults results = new ExecuteCallResults ( ) ; String rawParams = statement . substring ( matcher . end ( ) ) ; results . params = parseExecParameters ( rawParams ) ; results . procedure = results . params . remove ( 0 ) ; if ( procedures == null ) { results . paramTypes = null ; return results ; } Map < Integer , List < String > > signature = procedures . get ( results . procedure ) ; if ( signature == null ) { throw new SQLParser . Exception ( "Undefined procedure: %s" , results . procedure ) ; } results . paramTypes = signature . get ( results . params . size ( ) ) ; if ( results . paramTypes == null || results . params . size ( ) != results . paramTypes . size ( ) ) { String expectedSizes = "" ; for ( Integer expectedSize : signature . keySet ( ) ) { expectedSizes += expectedSize + ", " ; } throw new SQLParser . Exception ( "Invalid parameter count for procedure: %s (expected: %s received: %d)" , results . procedure , expectedSizes , results . params . size ( ) ) ; } return results ; } if ( commandWordTerminator . equals ( ";" ) ) { throw new SQLParser . Exception ( "Incomplete EXECUTE command. EXECUTE requires a procedure name argument." ) ; } throw new SQLParser . Exception ( "Invalid EXECUTE command. unexpected input: '" + commandWordTerminator + "'." ) ; }
Private implementation of parse EXECUTE procedure call . Also supports short - circuiting procedure lookup for testing .
30,925
public static boolean appearsToBeValidDDLBatch ( String batch ) { BufferedReader reader = new BufferedReader ( new StringReader ( batch ) ) ; String line ; try { while ( ( line = reader . readLine ( ) ) != null ) { if ( isWholeLineComment ( line ) ) { continue ; } line = line . trim ( ) ; if ( line . equals ( "" ) ) continue ; return queryIsDDL ( line ) ; } } catch ( IOException e ) { assert ( false ) ; } return true ; }
Make sure that the batch starts with an appropriate DDL verb . We do not look further than the first token of the first non - comment and non - whitespace line .
30,926
public static String parseEchoStatement ( String statement ) { Matcher matcher = EchoToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { return matcher . group ( 2 ) ; } return "" ; } return null ; }
Parse ECHO statement for sqlcmd . The result will be if the user just typed ECHO .
30,927
public static String parseEchoErrorStatement ( String statement ) { Matcher matcher = EchoErrorToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { return matcher . group ( 2 ) ; } return "" ; } return null ; }
Parse ECHOERROR statement for sqlcmd . The result will be if the user just typed ECHOERROR .
30,928
public static String parseDescribeStatement ( String statement ) { Matcher matcher = DescribeToken . matcher ( statement ) ; if ( matcher . matches ( ) ) { String commandWordTerminator = matcher . group ( 1 ) ; if ( OneWhitespace . matcher ( commandWordTerminator ) . matches ( ) ) { String trailings = matcher . group ( 3 ) + ";" + matcher . group ( 4 ) ; if ( trailings . equals ( ";" ) ) { return matcher . group ( 2 ) ; } return matcher . group ( 2 ) + " " + trailings ; } if ( commandWordTerminator . equals ( "" ) || commandWordTerminator . equals ( ";" ) ) { return commandWordTerminator ; } } return null ; }
Parse DESCRIBE statement for sqlcmd . The result will be if the user just typed DESCRIBE or DESC .
30,929
void resolveColumnRefernecesInUnionOrderBy ( ) { int orderCount = sortAndSlice . getOrderLength ( ) ; if ( orderCount == 0 ) { return ; } String [ ] unionColumnNames = getColumnNames ( ) ; for ( int i = 0 ; i < orderCount ; i ++ ) { Expression sort = ( Expression ) sortAndSlice . exprList . get ( i ) ; Expression e = sort . getLeftNode ( ) ; if ( e . getType ( ) == OpTypes . VALUE ) { if ( e . getDataType ( ) . typeCode == Types . SQL_INTEGER ) { int index = ( ( Integer ) e . getValue ( null ) ) . intValue ( ) ; if ( 0 < index && index <= unionColumnNames . length ) { sort . getLeftNode ( ) . queryTableColumnIndex = index - 1 ; continue ; } } } else if ( e . getType ( ) == OpTypes . COLUMN ) { int index = ArrayUtil . find ( unionColumnNames , e . getColumnName ( ) ) ; if ( index >= 0 ) { sort . getLeftNode ( ) . queryTableColumnIndex = index ; continue ; } } throw Error . error ( ErrorCode . X_42576 ) ; } sortAndSlice . prepare ( null ) ; }
Only simple column reference or column position allowed
30,930
public void setTableColumnNames ( HashMappedList list ) { if ( resultTable != null ) { ( ( TableDerived ) resultTable ) . columnList = list ; return ; } leftQueryExpression . setTableColumnNames ( list ) ; }
Used in views after full type resolution
30,931
public void setAsTopLevel ( ) { if ( compileContext . getSequences ( ) . length > 0 ) { throw Error . error ( ErrorCode . X_42598 ) ; } isTopLevel = true ; setReturningResultSet ( ) ; }
Not for views . Only used on root node .
30,932
void setReturningResultSet ( ) { if ( unionCorresponding ) { persistenceScope = TableBase . SCOPE_SESSION ; columnMode = TableBase . COLUMNS_UNREFERENCED ; return ; } leftQueryExpression . setReturningResultSet ( ) ; }
Sets the scope to SESSION for the QueryExpression object that creates the table
30,933
public void schedulePeriodicStats ( ) { Runnable statsPrinter = new Runnable ( ) { public void run ( ) { printStatistics ( ) ; } } ; m_scheduler . scheduleWithFixedDelay ( statsPrinter , m_config . displayinterval , m_config . displayinterval , TimeUnit . SECONDS ) ; }
Add a task to the scheduler to print statistics to the console at regular intervals .
30,934
public synchronized void printResults ( ) throws Exception { ClientStats stats = m_fullStatsContext . fetch ( ) . getStats ( ) ; System . out . print ( HORIZONTAL_RULE ) ; System . out . println ( " Client Workload Statistics" ) ; System . out . println ( HORIZONTAL_RULE ) ; System . out . printf ( "Average throughput: %,9d txns/sec\n" , stats . getTxnThroughput ( ) ) ; if ( m_config . latencyreport ) { System . out . printf ( "Average latency: %,9.2f ms\n" , stats . getAverageLatency ( ) ) ; System . out . printf ( "10th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .1 ) ) ; System . out . printf ( "25th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .25 ) ) ; System . out . printf ( "50th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .5 ) ) ; System . out . printf ( "75th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .75 ) ) ; System . out . printf ( "90th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .9 ) ) ; System . out . printf ( "95th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .95 ) ) ; System . out . printf ( "99th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .99 ) ) ; System . out . printf ( "99.5th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .995 ) ) ; System . out . printf ( "99.9th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .999 ) ) ; System . out . print ( "\n" + HORIZONTAL_RULE ) ; System . out . println ( " System Server Statistics" ) ; System . out . println ( HORIZONTAL_RULE ) ; System . out . printf ( "Reported Internal Avg Latency: %,9.2f ms\n" , stats . getAverageInternalLatency ( ) ) ; System . out . print ( "\n" + HORIZONTAL_RULE ) ; System . out . println ( " Latency Histogram" ) ; System . out . println ( HORIZONTAL_RULE ) ; System . out . println ( stats . latencyHistoReport ( ) ) ; } m_client . writeSummaryCSV ( stats , m_config . statsfile ) ; }
Prints some summary statistics about performance .
30,935
private void shutdown ( ) { m_scheduler . shutdown ( ) ; try { m_scheduler . awaitTermination ( 60 , TimeUnit . SECONDS ) ; } catch ( InterruptedException e ) { e . printStackTrace ( ) ; } try { m_client . drain ( ) ; m_client . close ( ) ; } catch ( IOException | InterruptedException e ) { e . printStackTrace ( ) ; } }
Perform various tasks to end the demo cleanly .
30,936
private void requestAd ( ) { long deviceId = Math . abs ( m_rand . nextLong ( ) ) % AdBrokerBenchmark . NUM_DEVICES ; GeographyPointValue point = getRandomPoint ( ) ; try { m_client . callProcedure ( new NullCallback ( ) , "GetHighestBidForLocation" , deviceId , point ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } }
Invoke the stored procedure GetHighestBidForLocation which given a random point returns the id of the bid that has the highest dollar amount .
30,937
public void promoteSinglePartitionInfo ( HashMap < AbstractExpression , Set < AbstractExpression > > valueEquivalence , Set < Set < AbstractExpression > > eqSets ) { assert ( getScanPartitioning ( ) != null ) ; if ( getScanPartitioning ( ) . getCountOfPartitionedTables ( ) == 0 || getScanPartitioning ( ) . requiresTwoFragments ( ) ) { return ; } AbstractExpression spExpr = getScanPartitioning ( ) . singlePartitioningExpression ( ) ; for ( SchemaColumn col : m_partitioningColumns ) { AbstractExpression tveKey = col . getExpression ( ) ; assert ( tveKey instanceof TupleValueExpression ) ; Set < AbstractExpression > values = null ; if ( valueEquivalence . containsKey ( tveKey ) ) { values = valueEquivalence . get ( tveKey ) ; } else if ( valueEquivalence . containsKey ( spExpr ) ) { values = valueEquivalence . get ( spExpr ) ; } else { for ( SchemaColumn otherCol : m_partitioningColumns ) { if ( col != otherCol && valueEquivalence . containsKey ( otherCol . getExpression ( ) ) ) { values = valueEquivalence . get ( otherCol . getExpression ( ) ) ; break ; } } if ( values == null ) { values = new HashSet < > ( ) ; } } updateEqualSets ( values , valueEquivalence , eqSets , tveKey , spExpr ) ; } }
upgrade single partitioning expression to parent level add the info to equality sets and input value equivalence
30,938
private void updateEqualSets ( Set < AbstractExpression > values , HashMap < AbstractExpression , Set < AbstractExpression > > valueEquivalence , Set < Set < AbstractExpression > > eqSets , AbstractExpression tveKey , AbstractExpression spExpr ) { boolean hasLegacyValues = false ; if ( eqSets . contains ( values ) ) { eqSets . remove ( values ) ; hasLegacyValues = true ; } values . add ( spExpr ) ; values . add ( tveKey ) ; if ( hasLegacyValues ) { eqSets . add ( values ) ; } valueEquivalence . put ( spExpr , values ) ; valueEquivalence . put ( tveKey , values ) ; }
Because HashSet stored a legacy hashcode for the non - final object .
30,939
public boolean getIsReplicated ( ) { for ( StmtTableScan tableScan : m_subqueryStmt . allScans ( ) ) { if ( ! tableScan . getIsReplicated ( ) ) { return false ; } } return true ; }
The subquery is replicated if all tables from the FROM clause defining this subquery are replicated
30,940
public TupleValueExpression getOutputExpression ( int index ) { SchemaColumn schemaCol = getSchemaColumn ( index ) ; TupleValueExpression tve = new TupleValueExpression ( getTableAlias ( ) , getTableAlias ( ) , schemaCol . getColumnAlias ( ) , schemaCol . getColumnAlias ( ) , index ) ; return tve ; }
Produce a tuple value expression for a column produced by this subquery
30,941
static private ComparisonExpression rangeFilterFromPrefixLike ( AbstractExpression leftExpr , ExpressionType rangeComparator , String comparand ) { ConstantValueExpression cve = new ConstantValueExpression ( ) ; cve . setValueType ( VoltType . STRING ) ; cve . setValue ( comparand ) ; cve . setValueSize ( comparand . length ( ) ) ; ComparisonExpression rangeFilter = new ComparisonExpression ( rangeComparator , leftExpr , cve ) ; return rangeFilter ; }
Construct the upper or lower bound expression that is implied by a prefix LIKE operator given its required elements .
30,942
public NodeSchema resetTableName ( String tbName , String tbAlias ) { m_columns . forEach ( sc -> sc . reset ( tbName , tbAlias , sc . getColumnName ( ) , sc . getColumnAlias ( ) ) ) ; m_columnsMapHelper . forEach ( ( k , v ) -> k . reset ( tbName , tbAlias , k . getColumnName ( ) , k . getColumnAlias ( ) ) ) ; return this ; }
Substitute table name only for all schema columns and map entries
30,943
public void addColumn ( SchemaColumn column ) { int size = m_columns . size ( ) ; m_columnsMapHelper . put ( column , size ) ; m_columns . add ( column ) ; }
Add a column to this schema .
30,944
public SchemaColumn find ( String tableName , String tableAlias , String columnName , String columnAlias ) { SchemaColumn col = new SchemaColumn ( tableName , tableAlias , columnName , columnAlias ) ; int index = findIndexOfColumn ( col ) ; if ( index != - 1 ) { return m_columns . get ( index ) ; } return null ; }
Retrieve the SchemaColumn that matches the provided arguments .
30,945
void sortByTveIndex ( int fromIndex , int toIndex ) { Collections . sort ( m_columns . subList ( fromIndex , toIndex ) , TVE_IDX_COMPARE ) ; }
Sort a sub - range of the schema columns by TVE index . All elements must be TupleValueExpressions . Modification is made in - place .
30,946
public boolean equalsOnlyNames ( NodeSchema otherSchema ) { if ( otherSchema == null ) { return false ; } if ( otherSchema . size ( ) != size ( ) ) { return false ; } for ( int colIndex = 0 ; colIndex < size ( ) ; colIndex ++ ) { SchemaColumn col1 = otherSchema . getColumn ( colIndex ) ; SchemaColumn col2 = m_columns . get ( colIndex ) ; if ( col1 . compareNames ( col2 ) != 0 ) { return false ; } } return true ; }
names are the same . Don t worry about the differentiator field .
30,947
NodeSchema copyAndReplaceWithTVE ( ) { NodeSchema copy = new NodeSchema ( ) ; int colIndex = 0 ; for ( SchemaColumn column : m_columns ) { copy . addColumn ( column . copyAndReplaceWithTVE ( colIndex ) ) ; ++ colIndex ; } return copy ; }
Returns a copy of this NodeSchema but with all non - TVE expressions replaced with an appropriate TVE . This is used primarily when generating a node s output schema based on its childrens schema ; we want to carry the columns across but leave any non - TVE expressions behind .
30,948
public boolean harmonize ( NodeSchema otherSchema , String schemaKindName ) { if ( size ( ) != otherSchema . size ( ) ) { throw new PlanningErrorException ( "The " + schemaKindName + "schema and the statement output schemas have different lengths." ) ; } boolean changedSomething = false ; for ( int idx = 0 ; idx < size ( ) ; idx += 1 ) { SchemaColumn myColumn = getColumn ( idx ) ; SchemaColumn otherColumn = otherSchema . getColumn ( idx ) ; VoltType myType = myColumn . getValueType ( ) ; VoltType otherType = otherColumn . getValueType ( ) ; VoltType commonType = myType ; if ( ! myType . canExactlyRepresentAnyValueOf ( otherType ) ) { if ( otherType . canExactlyRepresentAnyValueOf ( myType ) ) { commonType = otherType ; } else { throw new PlanningErrorException ( "The " + schemaKindName + " column type and the statement output type for column " + idx + " are incompatible." ) ; } } if ( myType != commonType ) { changedSomething = true ; myColumn . setValueType ( commonType ) ; } assert ( myType . isVariableLength ( ) == otherType . isVariableLength ( ) ) ; int commonSize ; if ( ! myType . isVariableLength ( ) ) { commonSize = myType . getLengthInBytesForFixedTypesWithoutCheck ( ) ; } else if ( myType == VoltType . STRING ) { boolean myInBytes = myColumn . getInBytes ( ) ; boolean otherInBytes = otherColumn . getInBytes ( ) ; if ( myInBytes == otherInBytes ) { commonSize = Math . max ( myColumn . getValueSize ( ) , otherColumn . getValueSize ( ) ) ; } else { int mySizeInBytes = ( myColumn . getInBytes ( ) ? 1 : 4 ) * myColumn . getValueSize ( ) ; int otherSizeInBytes = ( otherColumn . getInBytes ( ) ? 1 : 4 ) * otherColumn . getValueSize ( ) ; if ( ! myColumn . getInBytes ( ) ) { myColumn . setInBytes ( true ) ; changedSomething = true ; } commonSize = Math . max ( mySizeInBytes , otherSizeInBytes ) ; if ( commonSize > VoltType . MAX_VALUE_LENGTH ) { commonSize = VoltType . MAX_VALUE_LENGTH ; } } } else { commonSize = Math . max ( myColumn . getValueSize ( ) , otherColumn . getValueSize ( ) ) ; } if ( commonSize != myColumn . getValueSize ( ) ) { myColumn . setValueSize ( commonSize ) ; changedSomething = true ; } } return changedSomething ; }
Modifies this schema such that its columns can accommodate both values of its own types and that of otherSchema . Does not modify otherSchema .
30,949
void set ( final long valueIteratedTo , final long valueIteratedFrom , final long countAtValueIteratedTo , final long countInThisIterationStep , final long totalCountToThisValue , final long totalValueToThisValue , final double percentile , final double percentileLevelIteratedTo , double integerToDoubleValueConversionRatio ) { this . valueIteratedTo = valueIteratedTo ; this . valueIteratedFrom = valueIteratedFrom ; this . countAtValueIteratedTo = countAtValueIteratedTo ; this . countAddedInThisIterationStep = countInThisIterationStep ; this . totalCountToThisValue = totalCountToThisValue ; this . totalValueToThisValue = totalValueToThisValue ; this . percentile = percentile ; this . percentileLevelIteratedTo = percentileLevelIteratedTo ; this . integerToDoubleValueConversionRatio = integerToDoubleValueConversionRatio ; }
Set is all - or - nothing to avoid the potential for accidental omission of some values ...
30,950
public List < AbstractExpression > bindingToIndexedExpression ( AbstractExpression expr ) { if ( m_originalValue == null || ! m_originalValue . equals ( expr ) ) { return null ; } List < AbstractExpression > result = new ArrayList < AbstractExpression > ( ) ; result . add ( this ) ; return result ; }
query in which that constant differs .
30,951
Object getParameterAtIndex ( int partitionIndex ) { try { if ( serializedParams != null ) { return ParameterSet . getParameterAtIndex ( partitionIndex , serializedParams . duplicate ( ) ) ; } else { return params . get ( ) . getParam ( partitionIndex ) ; } } catch ( Exception ex ) { throw new RuntimeException ( "Invalid partitionIndex: " + partitionIndex , ex ) ; } }
Read into an serialized parameter buffer to extract a single parameter
30,952
public void flattenToBufferForOriginalVersion ( ByteBuffer buf ) throws IOException { assert ( ( params != null ) || ( serializedParams != null ) ) ; int startPosition = buf . position ( ) ; buf . put ( ProcedureInvocationType . ORIGINAL . getValue ( ) ) ; SerializationHelper . writeVarbinary ( getProcNameBytes ( ) , buf ) ; buf . putLong ( clientHandle ) ; serializeParams ( buf ) ; int len = buf . position ( ) - startPosition ; assert ( len == getSerializedSizeForOriginalVersion ( ) ) ; }
Serializes this SPI in the original serialization version . This is currently used by DR .
30,953
public synchronized void submit ( long offset ) { if ( submittedOffset == - 1L && offset >= 0 ) { committedOffsets [ idx ( offset ) ] = safeOffset = submittedOffset = offset ; } if ( firstOffset == - 1L ) { firstOffset = offset ; } if ( ( offset - safeOffset ) >= committedOffsets . length ) { offerOffset = offset ; try { wait ( m_gapFullWait ) ; } catch ( InterruptedException e ) { LOGGER . rateLimitedLog ( LOG_SUPPRESSION_INTERVAL_SECONDS , Level . WARN , e , "CommitTracker wait was interrupted for group " + consumerGroup + " topic " + topic + " partition " + partition ) ; } } if ( offset > submittedOffset ) { submittedOffset = offset ; } }
submit an offset while consuming a message and record the maximal submitted offset
30,954
public synchronized long commit ( long offset ) { if ( offset <= submittedOffset && offset > safeOffset ) { int ggap = ( int ) Math . min ( committedOffsets . length , offset - safeOffset ) ; if ( ggap == committedOffsets . length ) { LOGGER . rateLimitedLog ( LOG_SUPPRESSION_INTERVAL_SECONDS , Level . WARN , null , "CommitTracker moving topic commit point from %d to %d for topic " + topic + " partition " + partition + " group:" + consumerGroup , safeOffset , ( offset - committedOffsets . length + 1 ) ) ; safeOffset = offset - committedOffsets . length + 1 ; committedOffsets [ idx ( safeOffset ) ] = safeOffset ; } committedOffsets [ idx ( offset ) ] = offset ; while ( ggap > 0 && committedOffsets [ idx ( safeOffset ) ] + 1 == committedOffsets [ idx ( safeOffset + 1 ) ] ) { ++ safeOffset ; } if ( offerOffset >= 0 && ( offerOffset - safeOffset ) < committedOffsets . length ) { offerOffset = - 1L ; notify ( ) ; } } if ( offset == firstOffset ) { firstOffsetCommitted = true ; } return safeOffset ; }
VoltDB . It will be recorded in committedOffsets and calculate the offset - safeOffset which is safe to commit to Kafka
30,955
public void log ( long now , Level level , Throwable cause , String stemformat , Object ... args ) { if ( now - m_lastLogTime > m_maxLogIntervalMillis ) { synchronized ( this ) { if ( now - m_lastLogTime > m_maxLogIntervalMillis ) { String message = formatMessage ( cause , stemformat , args ) ; switch ( level ) { case DEBUG : m_logger . debug ( message ) ; break ; case ERROR : m_logger . error ( message ) ; break ; case FATAL : m_logger . fatal ( message ) ; break ; case INFO : m_logger . info ( message ) ; break ; case TRACE : m_logger . trace ( message ) ; break ; case WARN : m_logger . warn ( message ) ; break ; } m_lastLogTime = now ; } } } }
This variant delays the formatting of the string message until it is actually logged
30,956
private void sendFirstFragResponse ( ) { if ( ELASTICLOG . isDebugEnabled ( ) ) { ELASTICLOG . debug ( "P" + m_partitionId + " sending first fragment response to coordinator " + CoreUtils . hsIdToString ( m_coordinatorHsId ) ) ; } RejoinMessage msg = new RejoinMessage ( m_mailbox . getHSId ( ) , RejoinMessage . Type . FIRST_FRAGMENT_RECEIVED ) ; m_mailbox . send ( m_coordinatorHsId , msg ) ; m_firstFragResponseSent = true ; }
Notify the coordinator that this site has received the first fragment message
30,957
private void runForBlockingDataTransfer ( SiteProcedureConnection siteConnection ) { boolean sourcesReady = false ; RestoreWork restoreWork = m_dataSink . poll ( m_snapshotBufferAllocator ) ; if ( restoreWork != null ) { restoreBlock ( restoreWork , siteConnection ) ; sourcesReady = true ; } if ( m_dataSink . isEOF ( ) || m_snapshotCompletionMonitor . isDone ( ) ) { m_dataSink . close ( ) ; if ( m_streamSnapshotMb != null ) { VoltDB . instance ( ) . getHostMessenger ( ) . removeMailbox ( m_streamSnapshotMb . getHSId ( ) ) ; m_streamSnapshotMb = null ; ELASTICLOG . debug ( m_whoami + " data transfer is finished" ) ; } if ( m_snapshotCompletionMonitor . isDone ( ) ) { try { SnapshotCompletionEvent event = m_snapshotCompletionMonitor . get ( ) ; siteConnection . setDRProtocolVersion ( event . drVersion ) ; assert ( event != null ) ; ELASTICLOG . debug ( "P" + m_partitionId + " noticed data transfer completion" ) ; m_completionAction . setSnapshotTxnId ( event . multipartTxnId ) ; setJoinComplete ( siteConnection , event . exportSequenceNumbers , event . drSequenceNumbers , event . drMixedClusterSizeConsumerState , false , event . clusterCreateTime ) ; } catch ( InterruptedException e ) { VoltDB . crashLocalVoltDB ( "Impossible interruption happend" , true , e ) ; } catch ( ExecutionException e ) { VoltDB . crashLocalVoltDB ( "Error waiting for snapshot to finish" , true , e ) ; } } else { m_taskQueue . offer ( this ) ; } } else { returnToTaskQueue ( sourcesReady ) ; } }
Blocking transfer all partitioned table data and notify the coordinator .
30,958
public < T > T getService ( URI bundleURI , Class < T > svcClazz ) { return m_bundles . getService ( bundleURI , svcClazz ) ; }
Gets the service from the given bundle jar uri . Loads and starts the bundle if it isn t yet loaded
30,959
public void setPos ( int pos ) { position = pos ; NodeAVL n = nPrimaryNode ; while ( n != null ) { ( ( NodeAVLDisk ) n ) . iData = position ; n = n . nNext ; } }
Sets the file position for the row
30,960
void setNewNodes ( ) { int indexcount = tTable . getIndexCount ( ) ; nPrimaryNode = new NodeAVLDisk ( this , 0 ) ; NodeAVL n = nPrimaryNode ; for ( int i = 1 ; i < indexcount ; i ++ ) { n . nNext = new NodeAVLDisk ( this , i ) ; n = n . nNext ; } }
used in CachedDataRow
30,961
public void write ( RowOutputInterface out ) { try { writeNodes ( out ) ; if ( hasDataChanged ) { out . writeData ( rowData , tTable . colTypes ) ; out . writeEnd ( ) ; hasDataChanged = false ; } } catch ( IOException e ) { } }
Used exclusively by Cache to save the row to disk . New implementation in 1 . 7 . 2 writes out only the Node data if the table row data has not changed . This situation accounts for the majority of invocations as for each row deleted or inserted the Nodes for several other rows will change .
30,962
private void writeNodes ( RowOutputInterface out ) throws IOException { out . writeSize ( storageSize ) ; NodeAVL n = nPrimaryNode ; while ( n != null ) { n . write ( out ) ; n = n . nNext ; } hasNodesChanged = false ; }
Writes the Nodes immediately after the row size .
30,963
public void serializeToBuffer ( ByteBuffer b ) { assert ( getSerializedSize ( ) <= b . remaining ( ) ) ; b . putInt ( getSerializedSize ( ) - 4 ) ; b . put ( ( byte ) getExceptionType ( ) . ordinal ( ) ) ; if ( m_message != null ) { final byte messageBytes [ ] = m_message . getBytes ( ) ; b . putInt ( messageBytes . length ) ; b . put ( messageBytes ) ; } else { b . putInt ( 0 ) ; } p_serializeToBuffer ( b ) ; }
Serialize this exception to the supplied byte buffer
30,964
protected void populateColumnSchema ( ArrayList < ColumnInfo > columns ) { columns . add ( new ColumnInfo ( "TIMESTAMP" , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( VoltSystemProcedure . CNAME_HOST_ID , VoltSystemProcedure . CTYPE_ID ) ) ; columns . add ( new ColumnInfo ( "HOSTNAME" , VoltType . STRING ) ) ; }
Called from the constructor to generate the column schema at run time . Derived classes need to override this method in order to specify the columns they will be adding . The first line must always be a call the superclasses version of populateColumnSchema in order to ensure the columns are add to the list in the right order .
30,965
public Object [ ] [ ] getStatsRows ( boolean interval , final Long now ) { this . now = now ; synchronized ( this ) { Iterator < Object > i = getStatsRowKeyIterator ( interval ) ; ArrayList < Object [ ] > rows = new ArrayList < Object [ ] > ( ) ; while ( i . hasNext ( ) ) { Object rowKey = i . next ( ) ; Object rowValues [ ] = new Object [ columns . size ( ) ] ; updateStatsRow ( rowKey , rowValues ) ; rows . add ( rowValues ) ; } return rows . toArray ( new Object [ 0 ] [ ] ) ; } }
Get the latest stat values as an array of arrays of objects suitable for insertion into an VoltTable
30,966
protected void updateStatsRow ( Object rowKey , Object rowValues [ ] ) { rowValues [ 0 ] = now ; rowValues [ 1 ] = m_hostId ; rowValues [ 2 ] = m_hostname ; }
Update the parameter array with the latest values . This is similar to populateColumnSchema in that it must be overriden by derived classes and the derived class implementation must call the super classes implementation .
30,967
public long deserialize ( DataTree dt , Map < Long , Long > sessions ) throws IOException { List < File > snapList = findNValidSnapshots ( 100 ) ; if ( snapList . size ( ) == 0 ) { return - 1L ; } File snap = null ; boolean foundValid = false ; for ( int i = 0 ; i < snapList . size ( ) ; i ++ ) { snap = snapList . get ( i ) ; InputStream snapIS = null ; CheckedInputStream crcIn = null ; try { LOG . info ( "Reading snapshot " + snap ) ; snapIS = new BufferedInputStream ( new FileInputStream ( snap ) ) ; crcIn = new CheckedInputStream ( snapIS , new Adler32 ( ) ) ; InputArchive ia = BinaryInputArchive . getArchive ( crcIn ) ; deserialize ( dt , sessions , ia ) ; long checkSum = crcIn . getChecksum ( ) . getValue ( ) ; long val = ia . readLong ( "val" ) ; if ( val != checkSum ) { throw new IOException ( "CRC corruption in snapshot : " + snap ) ; } foundValid = true ; break ; } catch ( IOException e ) { LOG . warn ( "problem reading snap file " + snap , e ) ; } finally { if ( snapIS != null ) snapIS . close ( ) ; if ( crcIn != null ) crcIn . close ( ) ; } } if ( ! foundValid ) { throw new IOException ( "Not able to find valid snapshots in " + snapDir ) ; } dt . lastProcessedZxid = Util . getZxidFromName ( snap . getName ( ) , "snapshot" ) ; return dt . lastProcessedZxid ; }
deserialize a data tree from the most recent snapshot
30,968
public void deserialize ( DataTree dt , Map < Long , Long > sessions , InputArchive ia ) throws IOException { FileHeader header = new FileHeader ( ) ; header . deserialize ( ia , "fileheader" ) ; if ( header . getMagic ( ) != SNAP_MAGIC ) { throw new IOException ( "mismatching magic headers " + header . getMagic ( ) + " != " + FileSnap . SNAP_MAGIC ) ; } SerializeUtils . deserializeSnapshot ( dt , ia , sessions ) ; }
deserialize the datatree from an inputarchive
30,969
public File findMostRecentSnapshot ( ) throws IOException { List < File > files = findNValidSnapshots ( 1 ) ; if ( files . size ( ) == 0 ) { return null ; } return files . get ( 0 ) ; }
find the most recent snapshot in the database .
30,970
public List < File > findNRecentSnapshots ( int n ) throws IOException { List < File > files = Util . sortDataDir ( snapDir . listFiles ( ) , "snapshot" , false ) ; int i = 0 ; List < File > list = new ArrayList < File > ( ) ; for ( File f : files ) { if ( i == n ) break ; i ++ ; list . add ( f ) ; } return list ; }
find the last n snapshots . this does not have any checks if the snapshot might be valid or not
30,971
protected void serialize ( DataTree dt , Map < Long , Long > sessions , OutputArchive oa , FileHeader header ) throws IOException { if ( header == null ) throw new IllegalStateException ( "Snapshot's not open for writing: uninitialized header" ) ; header . serialize ( oa , "fileheader" ) ; SerializeUtils . serializeSnapshot ( dt , oa , sessions ) ; }
serialize the datatree and sessions
30,972
public synchronized void serialize ( DataTree dt , Map < Long , Long > sessions , File snapShot ) throws IOException { if ( ! close ) { OutputStream sessOS = new BufferedOutputStream ( new FileOutputStream ( snapShot ) ) ; CheckedOutputStream crcOut = new CheckedOutputStream ( sessOS , new Adler32 ( ) ) ; OutputArchive oa = BinaryOutputArchive . getArchive ( crcOut ) ; FileHeader header = new FileHeader ( SNAP_MAGIC , VERSION , dbId ) ; serialize ( dt , sessions , oa , header ) ; long val = crcOut . getChecksum ( ) . getValue ( ) ; oa . writeLong ( val , "val" ) ; oa . writeString ( "/" , "path" ) ; sessOS . flush ( ) ; crcOut . close ( ) ; sessOS . close ( ) ; } }
serialize the datatree and session into the file snapshot
30,973
public static Datum sampleSystemNow ( final boolean medium , final boolean large ) { Datum d = generateCurrentSample ( ) ; if ( d == null ) return null ; historyS . addLast ( d ) ; if ( historyS . size ( ) > historySize ) historyS . removeFirst ( ) ; if ( medium ) { historyM . addLast ( d ) ; if ( historyM . size ( ) > historySize ) historyM . removeFirst ( ) ; } if ( large ) { historyL . addLast ( d ) ; if ( historyL . size ( ) > historySize ) historyL . removeFirst ( ) ; } return d ; }
Synchronously collect memory stats .
30,974
public static synchronized void asyncSampleSystemNow ( final boolean medium , final boolean large ) { if ( mode == GetRSSMode . PS ) { if ( thread != null ) { if ( thread . isAlive ( ) ) return ; else thread = null ; } thread = new Thread ( new Runnable ( ) { public void run ( ) { sampleSystemNow ( medium , large ) ; } } ) ; thread . start ( ) ; } else { sampleSystemNow ( medium , large ) ; } }
Fire off a thread to asynchronously collect stats .
30,975
private static synchronized void initialize ( ) { PlatformProperties pp = PlatformProperties . getPlatformProperties ( ) ; String processName = java . lang . management . ManagementFactory . getRuntimeMXBean ( ) . getName ( ) ; String pidString = processName . substring ( 0 , processName . indexOf ( '@' ) ) ; pid = Integer . valueOf ( pidString ) ; initialized = true ; PSScraper . PSData psdata = PSScraper . getPSData ( pid ) ; assert ( psdata . rss > 0 ) ; memorysize = pp . ramInMegabytes ; assert ( memorysize > 0 ) ; long rss = - 1 ; try { rss = ExecutionEngine . nativeGetRSS ( ) ; } catch ( Throwable e ) { } if ( rss > 0 ) mode = GetRSSMode . MACOSX_NATIVE ; rss = getRSSFromProcFS ( ) ; if ( rss > 0 ) mode = GetRSSMode . PROCFS ; if ( mode == GetRSSMode . PS ) { VoltLogger logger = new VoltLogger ( "HOST" ) ; logger . warn ( "System statistics will be collected in a sub-optimal " + "manner because either procfs couldn't be read from or " + "the native library couldn't be loaded." ) ; } }
Get the process id the total memory size and determine the best way to get the RSS on an ongoing basis .
30,976
private static long getRSSFromProcFS ( ) { try { File statFile = new File ( String . format ( "/proc/%d/stat" , pid ) ) ; FileInputStream fis = new FileInputStream ( statFile ) ; try { BufferedReader r = new BufferedReader ( new InputStreamReader ( fis ) ) ; String stats = r . readLine ( ) ; String [ ] parts = stats . split ( " " ) ; return Long . parseLong ( parts [ 23 ] ) * 4 * 1024 ; } finally { fis . close ( ) ; } } catch ( Exception e ) { return - 1 ; } }
Get the RSS using the procfs . If procfs is not around this will return - 1 ;
30,977
private static synchronized Datum generateCurrentSample ( ) { if ( testStatsProducer != null ) { return testStatsProducer . getCurrentStatsData ( ) ; } if ( ! initialized ) initialize ( ) ; long rss = - 1 ; switch ( mode ) { case MACOSX_NATIVE : rss = ExecutionEngine . nativeGetRSS ( ) ; break ; case PROCFS : rss = getRSSFromProcFS ( ) ; break ; case PS : rss = PSScraper . getPSData ( pid ) . rss ; break ; } Datum d = new Datum ( rss ) ; return d ; }
Poll the operating system and generate a Datum
30,978
public static synchronized String getGoogleChartURL ( int minutes , int width , int height , String timeLabel ) { ArrayDeque < Datum > history = historyS ; if ( minutes > 2 ) history = historyM ; if ( minutes > 30 ) history = historyL ; HTMLChartHelper chart = new HTMLChartHelper ( ) ; chart . width = width ; chart . height = height ; chart . timeLabel = timeLabel ; HTMLChartHelper . DataSet Jds = new HTMLChartHelper . DataSet ( ) ; chart . data . add ( Jds ) ; Jds . title = "UsedJava" ; Jds . belowcolor = "ff9999" ; HTMLChartHelper . DataSet Rds = new HTMLChartHelper . DataSet ( ) ; chart . data . add ( Rds ) ; Rds . title = "RSS" ; Rds . belowcolor = "ff0000" ; HTMLChartHelper . DataSet RUds = new HTMLChartHelper . DataSet ( ) ; chart . data . add ( RUds ) ; RUds . title = "RSS+UnusedJava" ; RUds . dashlength = 6 ; RUds . spacelength = 3 ; RUds . thickness = 2 ; RUds . belowcolor = "ffffff" ; long cropts = System . currentTimeMillis ( ) ; cropts -= ( 60 * 1000 * minutes ) ; long modulo = ( 60 * 1000 * minutes ) / 30 ; double maxmemdatum = 0 ; for ( Datum d : history ) { if ( d . timestamp < cropts ) continue ; double javaused = d . javausedheapmem + d . javausedsysmem ; double javaunused = SystemStatsCollector . javamaxheapmem - d . javausedheapmem ; javaused /= 1204 * 1024 ; javaunused /= 1204 * 1024 ; double rss = d . rss / 1024 / 1024 ; long ts = ( d . timestamp / modulo ) * modulo ; if ( ( rss + javaunused ) > maxmemdatum ) maxmemdatum = rss + javaunused ; RUds . append ( ts , rss + javaunused ) ; Rds . append ( ts , rss ) ; Jds . append ( ts , javaused ) ; } chart . megsMax = 2 ; while ( chart . megsMax < maxmemdatum ) chart . megsMax *= 2 ; return chart . getURL ( minutes ) ; }
Get a URL that uses the Google Charts API to show a chart of memory usage history .
30,979
public static void main ( String [ ] args ) { int repeat = 1000 ; long start , duration , correct ; double per ; String processName = java . lang . management . ManagementFactory . getRuntimeMXBean ( ) . getName ( ) ; String pidString = processName . substring ( 0 , processName . indexOf ( '@' ) ) ; pid = Integer . valueOf ( pidString ) ; start = System . currentTimeMillis ( ) ; correct = 0 ; for ( int i = 0 ; i < repeat ; i ++ ) { long rss = PSScraper . getPSData ( pid ) . rss ; if ( rss > 0 ) correct ++ ; } duration = System . currentTimeMillis ( ) - start ; per = duration / ( double ) repeat ; System . out . printf ( "%.2f ms per \"ps\" call / %d / %d correct\n" , per , correct , repeat ) ; start = System . currentTimeMillis ( ) ; correct = 0 ; for ( int i = 0 ; i < repeat ; i ++ ) { long rss = getRSSFromProcFS ( ) ; if ( rss > 0 ) correct ++ ; } duration = System . currentTimeMillis ( ) - start ; per = duration / ( double ) repeat ; System . out . printf ( "%.2f ms per procfs read / %d / %d correct\n" , per , correct , repeat ) ; start = System . currentTimeMillis ( ) ; correct = 0 ; for ( int i = 0 ; i < repeat ; i ++ ) { long rss = ExecutionEngine . nativeGetRSS ( ) ; if ( rss > 0 ) correct ++ ; } duration = System . currentTimeMillis ( ) - start ; per = duration / ( double ) repeat ; System . out . printf ( "%.2f ms per ee.nativeGetRSS call / %d / %d correct\n" , per , correct , repeat ) ; }
Manual performance testing code for getting stats .
30,980
void rollbackPartial ( Session session , int start , long timestamp ) { Object [ ] list = session . rowActionList . getArray ( ) ; int limit = session . rowActionList . size ( ) ; if ( start == limit ) { return ; } for ( int i = start ; i < limit ; i ++ ) { RowAction action = ( RowAction ) list [ i ] ; if ( action != null ) { action . rollback ( session , timestamp ) ; } else { System . out . println ( "null action in rollback " + start ) ; } } mergeRolledBackTransaction ( session . rowActionList . getArray ( ) , start , limit ) ; rowActionMapRemoveTransaction ( session . rowActionList . getArray ( ) , start , limit , false ) ; session . rowActionList . setSize ( start ) ; }
rollback the row actions from start index in list and the given timestamp
30,981
public boolean canRead ( Session session , Row row ) { synchronized ( row ) { RowAction action = row . rowAction ; if ( action == null ) { return true ; } return action . canRead ( session ) ; } }
functional unit - accessibility of rows
30,982
public void setTransactionInfo ( CachedObject object ) { Row row = ( Row ) object ; if ( row . rowAction != null ) { return ; } RowAction rowact = ( RowAction ) rowActionMap . get ( row . position ) ; row . rowAction = rowact ; }
add transaction info to a row just loaded from the cache . called only for CACHED tables
30,983
void mergeRolledBackTransaction ( Object [ ] list , int start , int limit ) { for ( int i = start ; i < limit ; i ++ ) { RowAction rowact = ( RowAction ) list [ i ] ; if ( rowact == null || rowact . type == RowActionBase . ACTION_NONE || rowact . type == RowActionBase . ACTION_DELETE_FINAL ) { continue ; } Row row = rowact . memoryRow ; if ( row == null ) { PersistentStore store = rowact . session . sessionData . getRowStore ( rowact . table ) ; row = ( Row ) store . get ( rowact . getPos ( ) , false ) ; } if ( row == null ) { continue ; } synchronized ( row ) { rowact . mergeRollback ( row ) ; } } }
merge a given list of transaction rollback action with given timestamp
30,984
void addToCommittedQueue ( Session session , Object [ ] list ) { synchronized ( committedTransactionTimestamps ) { committedTransactions . addLast ( list ) ; committedTransactionTimestamps . addLast ( session . actionTimestamp ) ; } }
add a list of actions to the end of queue
30,985
void mergeExpiredTransactions ( Session session ) { long timestamp = getFirstLiveTransactionTimestamp ( ) ; while ( true ) { long commitTimestamp = 0 ; Object [ ] actions = null ; synchronized ( committedTransactionTimestamps ) { if ( committedTransactionTimestamps . isEmpty ( ) ) { break ; } commitTimestamp = committedTransactionTimestamps . getFirst ( ) ; if ( commitTimestamp < timestamp ) { committedTransactionTimestamps . removeFirst ( ) ; actions = ( Object [ ] ) committedTransactions . removeFirst ( ) ; } else { break ; } } mergeTransaction ( session , actions , 0 , actions . length , commitTimestamp ) ; rowActionMapRemoveTransaction ( actions , 0 , actions . length , true ) ; } }
expire all committed transactions that are no longer in scope
30,986
void endTransaction ( Session session ) { try { writeLock . lock ( ) ; long timestamp = session . transactionTimestamp ; synchronized ( liveTransactionTimestamps ) { session . isTransaction = false ; int index = liveTransactionTimestamps . indexOf ( timestamp ) ; liveTransactionTimestamps . remove ( index ) ; } mergeExpiredTransactions ( session ) ; } finally { writeLock . unlock ( ) ; } }
remove session from queue when a transaction ends and expire any committed transactions that are no longer required . remove transactions ended before the first timestamp in liveTransactionsSession queue
30,987
RowAction [ ] getRowActionList ( ) { try { writeLock . lock ( ) ; Session [ ] sessions = database . sessionManager . getAllSessions ( ) ; int [ ] tIndex = new int [ sessions . length ] ; RowAction [ ] rowActions ; int rowActionCount = 0 ; { int actioncount = 0 ; for ( int i = 0 ; i < sessions . length ; i ++ ) { actioncount += sessions [ i ] . getTransactionSize ( ) ; } rowActions = new RowAction [ actioncount ] ; } while ( true ) { boolean found = false ; long minChangeNo = Long . MAX_VALUE ; int sessionIndex = 0 ; for ( int i = 0 ; i < sessions . length ; i ++ ) { int tSize = sessions [ i ] . getTransactionSize ( ) ; if ( tIndex [ i ] < tSize ) { RowAction current = ( RowAction ) sessions [ i ] . rowActionList . get ( tIndex [ i ] ) ; if ( current . actionTimestamp < minChangeNo ) { minChangeNo = current . actionTimestamp ; sessionIndex = i ; } found = true ; } } if ( ! found ) { break ; } HsqlArrayList currentList = sessions [ sessionIndex ] . rowActionList ; for ( ; tIndex [ sessionIndex ] < currentList . size ( ) ; ) { RowAction current = ( RowAction ) currentList . get ( tIndex [ sessionIndex ] ) ; if ( current . actionTimestamp == minChangeNo + 1 ) { minChangeNo ++ ; } if ( current . actionTimestamp == minChangeNo ) { rowActions [ rowActionCount ++ ] = current ; tIndex [ sessionIndex ] ++ ; } else { break ; } } } return rowActions ; } finally { writeLock . unlock ( ) ; } }
Return an array of all row actions sorted by System Change No .
30,988
public DoubleIntIndex getTransactionIDList ( ) { writeLock . lock ( ) ; try { DoubleIntIndex lookup = new DoubleIntIndex ( 10 , false ) ; lookup . setKeysSearchTarget ( ) ; Iterator it = this . rowActionMap . keySet ( ) . iterator ( ) ; for ( ; it . hasNext ( ) ; ) { lookup . addUnique ( it . nextInt ( ) , 0 ) ; } return lookup ; } finally { writeLock . unlock ( ) ; } }
Return a lookup of all row ids for cached tables in transactions . For auto - defrag as currently there will be no RowAction entries at the time of defrag .
30,989
public void convertTransactionIDs ( DoubleIntIndex lookup ) { writeLock . lock ( ) ; try { RowAction [ ] list = new RowAction [ rowActionMap . size ( ) ] ; Iterator it = this . rowActionMap . values ( ) . iterator ( ) ; for ( int i = 0 ; it . hasNext ( ) ; i ++ ) { list [ i ] = ( RowAction ) it . next ( ) ; } rowActionMap . clear ( ) ; for ( int i = 0 ; i < list . length ; i ++ ) { int pos = lookup . lookupFirstEqual ( list [ i ] . getPos ( ) ) ; list [ i ] . setPos ( pos ) ; rowActionMap . put ( pos , list [ i ] ) ; } } finally { writeLock . unlock ( ) ; } }
Convert row ID s for cached table rows in transactions
30,990
protected VoltMessage instantiate_local ( byte messageType ) { VoltMessage message = null ; switch ( messageType ) { case INITIATE_TASK_ID : message = new InitiateTaskMessage ( ) ; break ; case INITIATE_RESPONSE_ID : message = new InitiateResponseMessage ( ) ; break ; case FRAGMENT_TASK_ID : message = new FragmentTaskMessage ( ) ; break ; case FRAGMENT_RESPONSE_ID : message = new FragmentResponseMessage ( ) ; break ; case PARTICIPANT_NOTICE_ID : message = new MultiPartitionParticipantMessage ( ) ; break ; case COALESCED_HEARTBEAT_ID : message = new CoalescedHeartbeatMessage ( ) ; break ; case COMPLETE_TRANSACTION_ID : message = new CompleteTransactionMessage ( ) ; break ; case COMPLETE_TRANSACTION_RESPONSE_ID : message = new CompleteTransactionResponseMessage ( ) ; break ; case IV2_INITIATE_TASK_ID : message = new Iv2InitiateTaskMessage ( ) ; break ; case IV2_REPAIR_LOG_REQUEST : message = new Iv2RepairLogRequestMessage ( ) ; break ; case IV2_REPAIR_LOG_RESPONSE : message = new Iv2RepairLogResponseMessage ( ) ; break ; case REJOIN_RESPONSE_ID : message = new RejoinMessage ( ) ; break ; case REJOIN_DATA_ID : message = new RejoinDataMessage ( ) ; break ; case REJOIN_DATA_ACK_ID : message = new RejoinDataAckMessage ( ) ; break ; case FRAGMENT_TASK_LOG_ID : message = new FragmentTaskLogMessage ( ) ; break ; case IV2_LOG_FAULT_ID : message = new Iv2LogFaultMessage ( ) ; break ; case IV2_EOL_ID : message = new Iv2EndOfLogMessage ( ) ; break ; case DUMP : message = new DumpMessage ( ) ; break ; case MP_REPLAY_ID : message = new MpReplayMessage ( ) ; break ; case MP_REPLAY_ACK_ID : message = new MpReplayAckMessage ( ) ; break ; case SNAPSHOT_CHECK_REQUEST_ID : message = new SnapshotCheckRequestMessage ( ) ; break ; case SNAPSHOT_CHECK_RESPONSE_ID : message = new SnapshotCheckResponseMessage ( ) ; break ; case IV2_REPAIR_LOG_TRUNCATION : message = new RepairLogTruncationMessage ( ) ; break ; case DR2_MULTIPART_TASK_ID : message = new Dr2MultipartTaskMessage ( ) ; break ; case DR2_MULTIPART_RESPONSE_ID : message = new Dr2MultipartResponseMessage ( ) ; break ; case DUMMY_TRANSACTION_TASK_ID : message = new DummyTransactionTaskMessage ( ) ; break ; case DUMMY_TRANSACTION_RESPONSE_ID : message = new DummyTransactionResponseMessage ( ) ; break ; case Migrate_Partition_Leader_MESSAGE_ID : message = new MigratePartitionLeaderMessage ( ) ; break ; case DUMP_PLAN_ID : message = new DumpPlanThenExitMessage ( ) ; break ; case FLUSH_RO_TXN_MESSAGE_ID : message = new MPBacklogFlushMessage ( ) ; break ; default : message = null ; } return message ; }
Overridden by subclasses to create message types unknown by voltcore
30,991
void clearStructures ( ) { if ( schemaManager != null ) { schemaManager . clearStructures ( ) ; } granteeManager = null ; userManager = null ; nameManager = null ; schemaManager = null ; sessionManager = null ; dbInfo = null ; }
Clears the data structuress making them elligible for garbage collection .
30,992
public Result getScript ( boolean indexRoots ) { Result r = Result . newSingleColumnResult ( "COMMAND" , Type . SQL_VARCHAR ) ; String [ ] list = getSettingsSQL ( ) ; addRows ( r , list ) ; list = getGranteeManager ( ) . getSQL ( ) ; addRows ( r , list ) ; list = schemaManager . getSQLArray ( ) ; addRows ( r , list ) ; if ( indexRoots ) { list = schemaManager . getIndexRootsSQL ( ) ; addRows ( r , list ) ; } list = getUserManager ( ) . getInitialSchemaSQL ( ) ; addRows ( r , list ) ; list = getGranteeManager ( ) . getRightstSQL ( ) ; addRows ( r , list ) ; list = getPropertiesSQL ( ) ; addRows ( r , list ) ; return r ; }
Returns the schema and authorisation statements for the database .
30,993
private Expression readWindowSpecification ( int tokenT , Expression aggExpr ) { SortAndSlice sortAndSlice = null ; readThis ( Tokens . OPENBRACKET ) ; List < Expression > partitionByList = new ArrayList < > ( ) ; if ( token . tokenType == Tokens . PARTITION ) { read ( ) ; readThis ( Tokens . BY ) ; while ( true ) { Expression partitionExpr = XreadValueExpression ( ) ; partitionByList . add ( partitionExpr ) ; if ( token . tokenType == Tokens . COMMA ) { read ( ) ; continue ; } break ; } } if ( token . tokenType == Tokens . ORDER ) { read ( ) ; readThis ( Tokens . BY ) ; sortAndSlice = XreadOrderBy ( ) ; } readThis ( Tokens . CLOSEBRACKET ) ; assert ( aggExpr == null || aggExpr instanceof ExpressionAggregate ) ; Expression nodes [ ] ; boolean isDistinct ; if ( aggExpr != null ) { ExpressionAggregate winAggExpr = ( ExpressionAggregate ) aggExpr ; nodes = winAggExpr . nodes ; isDistinct = winAggExpr . isDistinctAggregate ; } else { nodes = Expression . emptyExpressionArray ; isDistinct = false ; } ExpressionWindowed windowedExpr = new ExpressionWindowed ( tokenT , nodes , isDistinct , sortAndSlice , partitionByList ) ; return windowedExpr ; }
This is a minimal parsing of the Window Specification . We only use partition by and order by lists . There is a lot of complexity in the full SQL specification which we don t parse at all .
30,994
private ExpressionLogical XStartsWithPredicateRightPart ( Expression left ) { readThis ( Tokens . WITH ) ; if ( token . tokenType == Tokens . QUESTION ) { Expression right = XreadRowValuePredicand ( ) ; if ( left . isParam ( ) && right . isParam ( ) ) { throw Error . error ( ErrorCode . X_42567 ) ; } Expression l = new ExpressionLogical ( OpTypes . GREATER_EQUAL , left , right ) ; Expression r = new ExpressionLogical ( OpTypes . SMALLER_EQUAL , left , new ExpressionArithmetic ( OpTypes . CONCAT , right , new ExpressionValue ( "\uffff" , Type . SQL_CHAR ) ) ) ; return new ExpressionLogical ( OpTypes . AND , l , r ) ; } else { Expression right = XreadStringValueExpression ( ) ; return new ExpressionStartsWith ( left , right , this . isCheckOrTriggerCondition ) ; } }
Scan the right - side string value return a STARTS WITH Expression for generating XML
30,995
Expression XreadRowValueConstructor ( ) { Expression e ; e = XreadExplicitRowValueConstructorOrNull ( ) ; if ( e != null ) { return e ; } e = XreadRowOrCommonValueExpression ( ) ; if ( e != null ) { return e ; } return XreadBooleanValueExpression ( ) ; }
ISSUE - XreadCommonValueExpression and XreadBooleanValueExpression should merge
30,996
Expression XreadExplicitRowValueConstructorOrNull ( ) { Expression e ; switch ( token . tokenType ) { case Tokens . OPENBRACKET : { read ( ) ; int position = getPosition ( ) ; int brackets = readOpenBrackets ( ) ; switch ( token . tokenType ) { case Tokens . TABLE : case Tokens . VALUES : case Tokens . SELECT : rewind ( position ) ; SubQuery sq = XreadSubqueryBody ( false , OpTypes . ROW_SUBQUERY ) ; readThis ( Tokens . CLOSEBRACKET ) ; return new Expression ( OpTypes . ROW_SUBQUERY , sq ) ; default : rewind ( position ) ; e = XreadRowElementList ( true ) ; readThis ( Tokens . CLOSEBRACKET ) ; return e ; } } case Tokens . ROW : { read ( ) ; readThis ( Tokens . OPENBRACKET ) ; e = XreadRowElementList ( false ) ; readThis ( Tokens . CLOSEBRACKET ) ; return e ; } } return null ; }
must be called in conjusnction with <parenthesized ..
30,997
private Expression readCaseWhen ( final Expression l ) { readThis ( Tokens . WHEN ) ; Expression condition = null ; if ( l == null ) { condition = XreadBooleanValueExpression ( ) ; } else { while ( true ) { Expression newCondition = XreadPredicateRightPart ( l ) ; if ( l == newCondition ) { newCondition = new ExpressionLogical ( l , XreadRowValuePredicand ( ) ) ; } if ( condition == null ) { condition = newCondition ; } else { condition = new ExpressionLogical ( OpTypes . OR , condition , newCondition ) ; } if ( token . tokenType == Tokens . COMMA ) { read ( ) ; } else { break ; } } } readThis ( Tokens . THEN ) ; Expression current = XreadValueExpression ( ) ; Expression elseExpr = null ; if ( token . tokenType == Tokens . WHEN ) { elseExpr = readCaseWhen ( l ) ; } else if ( token . tokenType == Tokens . ELSE ) { read ( ) ; elseExpr = XreadValueExpression ( ) ; readThis ( Tokens . END ) ; readIfThis ( Tokens . CASE ) ; } else { elseExpr = new ExpressionValue ( ( Object ) null , Type . SQL_ALL_TYPES ) ; readThis ( Tokens . END ) ; readIfThis ( Tokens . CASE ) ; } Expression alternatives = new ExpressionOp ( OpTypes . ALTERNATIVE , current , elseExpr ) ; Expression casewhen = new ExpressionOp ( OpTypes . CASEWHEN , condition , alternatives ) ; return casewhen ; }
Reads part of a CASE .. WHEN expression
30,998
private Expression readCaseWhenExpression ( ) { Expression l = null ; read ( ) ; readThis ( Tokens . OPENBRACKET ) ; l = XreadBooleanValueExpression ( ) ; readThis ( Tokens . COMMA ) ; Expression thenelse = XreadRowValueExpression ( ) ; readThis ( Tokens . COMMA ) ; thenelse = new ExpressionOp ( OpTypes . ALTERNATIVE , thenelse , XreadValueExpression ( ) ) ; l = new ExpressionOp ( OpTypes . CASEWHEN , l , thenelse ) ; readThis ( Tokens . CLOSEBRACKET ) ; return l ; }
reads a CASEWHEN expression
30,999
private Expression readCastExpression ( ) { boolean isConvert = token . tokenType == Tokens . CONVERT ; read ( ) ; readThis ( Tokens . OPENBRACKET ) ; Expression l = this . XreadValueExpressionOrNull ( ) ; if ( isConvert ) { readThis ( Tokens . COMMA ) ; } else { readThis ( Tokens . AS ) ; } Type typeObject = readTypeDefinition ( true ) ; if ( l . isParam ( ) ) { l . setDataType ( session , typeObject ) ; } l = new ExpressionOp ( l , typeObject ) ; readThis ( Tokens . CLOSEBRACKET ) ; return l ; }
Reads a CAST or CONVERT expression