idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
31,400
public void getScanNodeList_recurse ( ArrayList < AbstractScanPlanNode > collected , HashSet < AbstractPlanNode > visited ) { if ( visited . contains ( this ) ) { assert ( false ) : "do not expect loops in plangraph." ; return ; } visited . add ( this ) ; for ( AbstractPlanNode n : m_children ) { n . getScanNodeList_recurse ( collected , visited ) ; } for ( AbstractPlanNode node : m_inlineNodes . values ( ) ) { node . getScanNodeList_recurse ( collected , visited ) ; } }
postorder adding scan nodes
31,401
public void getPlanNodeList_recurse ( ArrayList < AbstractPlanNode > collected , HashSet < AbstractPlanNode > visited ) { if ( visited . contains ( this ) ) { assert ( false ) : "do not expect loops in plangraph." ; return ; } visited . add ( this ) ; for ( AbstractPlanNode n : m_children ) { n . getPlanNodeList_recurse ( collected , visited ) ; } collected . add ( this ) ; }
postorder add nodes
31,402
private static Object nullValueForType ( final Class < ? > expectedClz ) { if ( expectedClz == long . class ) { return VoltType . NULL_BIGINT ; } else if ( expectedClz == int . class ) { return VoltType . NULL_INTEGER ; } else if ( expectedClz == short . class ) { return VoltType . NULL_SMALLINT ; } else if ( expectedClz == byte . class ) { return VoltType . NULL_TINYINT ; } else if ( expectedClz == double . class ) { return VoltType . NULL_FLOAT ; } return null ; }
Get the appropriate and compatible null value for a given parameter type .
31,403
private static Object convertStringToPrimitiveOrPrimitiveWrapper ( String value , final Class < ? > expectedClz ) throws VoltTypeException { value = value . trim ( ) ; if ( value . equals ( Constants . CSV_NULL ) ) return nullValueForType ( expectedClz ) ; String commaFreeValue = thousandSeparator . matcher ( value ) . replaceAll ( "" ) ; try { if ( isLongClass ( expectedClz ) ) { return Long . parseLong ( commaFreeValue ) ; } if ( isIntClass ( expectedClz ) ) { return Integer . parseInt ( commaFreeValue ) ; } if ( isShortClass ( expectedClz ) ) { return Short . parseShort ( commaFreeValue ) ; } if ( isByteClass ( expectedClz ) ) { return Byte . parseByte ( commaFreeValue ) ; } if ( isDoubleClass ( expectedClz ) ) { return Double . parseDouble ( commaFreeValue ) ; } } catch ( NumberFormatException nfe ) { if ( expectedClz != double . class ) { String hexDigits = SQLParser . getDigitsFromHexLiteral ( value ) ; if ( hexDigits != null ) { try { return SQLParser . hexDigitsToLong ( hexDigits ) ; } catch ( SQLParser . Exception spe ) { } } } } throw new VoltTypeException ( "Unable to convert string " + value + " to " + expectedClz . getName ( ) + " value for target parameter." ) ; }
Given a string covert it to a primitive type or boxed type of the primitive type or return null .
31,404
private static Object tryToMakeCompatibleArray ( final Class < ? > expectedComponentClz , final Class < ? > inputComponentClz , Object param ) throws VoltTypeException { int inputLength = Array . getLength ( param ) ; if ( inputComponentClz == expectedComponentClz ) { return param ; } else if ( inputLength == 0 ) { return Array . newInstance ( expectedComponentClz , 0 ) ; } else if ( isByteArrayClass ( inputComponentClz ) && ( expectedComponentClz == String . class ) ) { String [ ] values = new String [ inputLength ] ; for ( int i = 0 ; i < inputLength ; i ++ ) { try { values [ i ] = new String ( ( byte [ ] ) Array . get ( param , i ) , "UTF-8" ) ; } catch ( UnsupportedEncodingException ex ) { throw new VoltTypeException ( "tryScalarMakeCompatible: Unsupported encoding:" + expectedComponentClz . getName ( ) + " to provided " + inputComponentClz . getName ( ) ) ; } } return values ; } else if ( ( inputComponentClz == String . class ) && ( expectedComponentClz == byte [ ] . class ) ) { byte [ ] [ ] values = new byte [ inputLength ] [ ] ; for ( int i = 0 ; i < inputLength ; i ++ ) { values [ i ] = Encoder . hexDecode ( ( String ) Array . get ( param , i ) ) ; } return values ; } else if ( ( inputComponentClz == String . class ) && ( expectedComponentClz == Byte [ ] . class ) ) { Byte [ ] [ ] boxvalues = new Byte [ inputLength ] [ ] ; for ( int i = 0 ; i < inputLength ; i ++ ) { boxvalues [ i ] = ArrayUtils . toObject ( Encoder . hexDecode ( ( String ) Array . get ( param , i ) ) ) ; } return boxvalues ; } else { throw new VoltTypeException ( "tryScalarMakeCompatible: Unable to match parameter array:" + expectedComponentClz . getName ( ) + " to provided " + inputComponentClz . getName ( ) ) ; } }
Factored out code to handle array parameter types .
31,405
final static public VoltTable [ ] getResultsFromRawResults ( String procedureName , Object result ) throws InvocationTargetException { if ( result == null ) { return new VoltTable [ 0 ] ; } if ( result instanceof VoltTable [ ] ) { VoltTable [ ] retval = ( VoltTable [ ] ) result ; for ( VoltTable table : retval ) { if ( table == null ) { Exception e = new RuntimeException ( "VoltTable arrays with non-zero length cannot contain null values." ) ; throw new InvocationTargetException ( e ) ; } table . convertToHeapBuffer ( ) ; } return retval ; } if ( result instanceof VoltTable ) { VoltTable vt = ( VoltTable ) result ; vt . convertToHeapBuffer ( ) ; return new VoltTable [ ] { vt } ; } if ( result instanceof Long ) { VoltTable t = new VoltTable ( new VoltTable . ColumnInfo ( "" , VoltType . BIGINT ) ) ; t . addRow ( result ) ; return new VoltTable [ ] { t } ; } throw new RuntimeException ( String . format ( "Procedure %s unsupported procedure return type %s." , procedureName , result . getClass ( ) . getSimpleName ( ) ) ) ; }
Given the results of a procedure convert it into a sensible array of VoltTables .
31,406
public static void main ( String [ ] args ) { if ( args . length > 1 ) { printUsage ( ) ; } if ( args . length == 0 || ( args . length == 1 && args [ 0 ] . equals ( "--full" ) ) ) { System . out . println ( getFullVersion ( ) ) ; System . exit ( 0 ) ; } if ( args [ 0 ] . equals ( "--short" ) ) System . out . println ( getVersion ( ) ) ; else if ( args [ 0 ] . equals ( "--revision" ) ) System . out . println ( getVersionRevision ( ) ) ; else printUsage ( ) ; System . exit ( 0 ) ; }
Prints the current version revision and build date to the standard out .
31,407
void setFinal ( boolean isFinal ) throws IOException { if ( isFinal != m_isFinal ) { if ( PBDSegment . setFinal ( m_file , isFinal ) ) { if ( ! isFinal ) { m_fc . force ( true ) ; } } else if ( PBDSegment . isFinal ( m_file ) && ! isFinal ) { throw new IOException ( "Could not remove the final attribute from " + m_file . getName ( ) ) ; } m_isFinal = isFinal ; } }
Set or clear segment as final i . e . whether segment is complete and logically immutable .
31,408
public static String createParticipantNode ( ZooKeeper zk , String dir , String prefix , byte [ ] data ) throws KeeperException , InterruptedException { createRootIfNotExist ( zk , dir ) ; String node = zk . create ( ZKUtil . joinZKPath ( dir , prefix + "_" ) , data , Ids . OPEN_ACL_UNSAFE , CreateMode . EPHEMERAL_SEQUENTIAL ) ; zk . setData ( dir , new byte [ ] { INITIALIZED } , - 1 ) ; return node ; }
Provide a way for clients to create nodes which comply with the leader election format without participating in a leader election
31,409
synchronized public void shutdown ( ) throws InterruptedException , KeeperException { m_shutdown = true ; es . shutdown ( ) ; es . awaitTermination ( 365 , TimeUnit . DAYS ) ; }
Deletes the ephemeral node . Make sure that no future watches will fire .
31,410
private boolean watchNextLowerNode ( ) throws KeeperException , InterruptedException { List < String > children = zk . getChildren ( dir , false ) ; Collections . sort ( children ) ; ListIterator < String > iter = children . listIterator ( ) ; String me = null ; while ( iter . hasNext ( ) ) { me = ZKUtil . joinZKPath ( dir , iter . next ( ) ) ; if ( me . equals ( node ) ) { break ; } } assert ( me != null ) ; iter . previous ( ) ; while ( iter . hasPrevious ( ) ) { String previous = ZKUtil . joinZKPath ( dir , iter . previous ( ) ) ; if ( zk . exists ( previous , electionWatcher ) != null ) { return false ; } } return true ; }
Set a watch on the node that comes before the specified node in the directory .
31,411
public CharEscaperBuilder addEscape ( char c , String r ) { map . put ( c , checkNotNull ( r ) ) ; if ( c > max ) { max = c ; } return this ; }
Add a new mapping from an index to an object to the escaping .
31,412
public CharEscaperBuilder addEscapes ( char [ ] cs , String r ) { checkNotNull ( r ) ; for ( char c : cs ) { addEscape ( c , r ) ; } return this ; }
Add multiple mappings at once for a particular index .
31,413
private void deliverReadyTxns ( ) { VoltMessage m = m_replaySequencer . poll ( ) ; while ( m != null ) { deliver ( m ) ; m = m_replaySequencer . poll ( ) ; } m = m_replaySequencer . drain ( ) ; while ( m != null ) { if ( m instanceof Iv2InitiateTaskMessage ) { Iv2InitiateTaskMessage task = ( Iv2InitiateTaskMessage ) m ; final InitiateResponseMessage response = new InitiateResponseMessage ( task ) ; response . setResults ( new ClientResponseImpl ( ClientResponse . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , ClientResponseImpl . IGNORED_TRANSACTION ) ) ; m_mailbox . send ( response . getInitiatorHSId ( ) , response ) ; } m = m_replaySequencer . drain ( ) ; } }
Poll the replay sequencer and process the messages until it returns null
31,414
public boolean sequenceForReplay ( VoltMessage message ) { boolean canDeliver = false ; long sequenceWithUniqueId = Long . MIN_VALUE ; boolean commandLog = ( message instanceof TransactionInfoBaseMessage && ( ( ( TransactionInfoBaseMessage ) message ) . isForReplay ( ) ) ) ; boolean sentinel = message instanceof MultiPartitionParticipantMessage ; boolean replay = commandLog || sentinel ; boolean sequenceForReplay = m_isLeader && replay ; if ( replay ) { sequenceWithUniqueId = ( ( TransactionInfoBaseMessage ) message ) . getUniqueId ( ) ; } if ( sequenceForReplay ) { InitiateResponseMessage dupe = m_replaySequencer . dedupe ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; if ( dupe != null ) { m_mailbox . send ( dupe . getInitiatorHSId ( ) , dupe ) ; } else if ( ! m_replaySequencer . offer ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ) { canDeliver = true ; } else { deliverReadyTxns ( ) ; } if ( sentinel && ! commandLog ) { MultiPartitionParticipantMessage mppm = ( MultiPartitionParticipantMessage ) message ; final InitiateResponseMessage response = new InitiateResponseMessage ( mppm ) ; ClientResponseImpl clientResponse = new ClientResponseImpl ( ClientResponseImpl . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , ClientResponseImpl . IGNORED_TRANSACTION ) ; response . setResults ( clientResponse ) ; m_mailbox . send ( response . getInitiatorHSId ( ) , response ) ; } } else { if ( replay ) { m_replaySequencer . updateLastSeenUniqueId ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; m_replaySequencer . updateLastPolledUniqueId ( sequenceWithUniqueId , ( TransactionInfoBaseMessage ) message ) ; } canDeliver = true ; } return canDeliver ; }
Sequence the message for replay if it s for CL or DR .
31,415
private void doLocalInitiateOffer ( Iv2InitiateTaskMessage msg ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPI ) ; if ( traceLog != null ) { final String threadName = Thread . currentThread ( ) . getName ( ) ; traceLog . add ( ( ) -> VoltTrace . meta ( "process_name" , "name" , CoreUtils . getHostnameOrAddress ( ) ) ) . add ( ( ) -> VoltTrace . meta ( "thread_name" , "name" , threadName ) ) . add ( ( ) -> VoltTrace . meta ( "thread_sort_index" , "sort_index" , Integer . toString ( 10000 ) ) ) . add ( ( ) -> VoltTrace . beginAsync ( "initsp" , MiscUtils . hsIdPairTxnIdToString ( m_mailbox . getHSId ( ) , m_mailbox . getHSId ( ) , msg . getSpHandle ( ) , msg . getClientInterfaceHandle ( ) ) , "ciHandle" , msg . getClientInterfaceHandle ( ) , "txnId" , TxnEgo . txnIdToString ( msg . getTxnId ( ) ) , "partition" , m_partitionId , "read" , msg . isReadOnly ( ) , "name" , msg . getStoredProcedureName ( ) , "hsId" , CoreUtils . hsIdToString ( m_mailbox . getHSId ( ) ) ) ) ; } final String procedureName = msg . getStoredProcedureName ( ) ; final SpProcedureTask task = new SpProcedureTask ( m_mailbox , procedureName , m_pendingTasks , msg ) ; ListenableFuture < Object > durabilityBackpressureFuture = m_cl . log ( msg , msg . getSpHandle ( ) , null , m_durabilityListener , task ) ; if ( traceLog != null && durabilityBackpressureFuture != null ) { traceLog . add ( ( ) -> VoltTrace . beginAsync ( "durability" , MiscUtils . hsIdTxnIdToString ( m_mailbox . getHSId ( ) , msg . getSpHandle ( ) ) , "txnId" , TxnEgo . txnIdToString ( msg . getTxnId ( ) ) , "partition" , Integer . toString ( m_partitionId ) ) ) ; } if ( m_cl . canOfferTask ( ) ) { m_pendingTasks . offer ( task . setDurabilityBackpressureFuture ( durabilityBackpressureFuture ) ) ; } }
Do the work necessary to turn the Iv2InitiateTaskMessage into a TransactionTask which can be queued to the TransactionTaskQueue . This is reused by both the normal message handling path and the repair path and assumes that the caller has dealt with or ensured that the necessary ID SpHandles and replication issues are resolved .
31,416
private void handleBorrowTaskMessage ( BorrowTaskMessage message ) { long newSpHandle = getMaxScheduledTxnSpHandle ( ) ; Iv2Trace . logFragmentTaskMessage ( message . getFragmentTaskMessage ( ) , m_mailbox . getHSId ( ) , newSpHandle , true ) ; final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPI ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . beginAsync ( "recvfragment" , MiscUtils . hsIdPairTxnIdToString ( m_mailbox . getHSId ( ) , m_mailbox . getHSId ( ) , newSpHandle , 0 ) , "txnId" , TxnEgo . txnIdToString ( message . getTxnId ( ) ) , "partition" , m_partitionId , "hsId" , CoreUtils . hsIdToString ( m_mailbox . getHSId ( ) ) ) ) ; } TransactionState txn = m_outstandingTxns . get ( message . getTxnId ( ) ) ; if ( txn == null ) { txn = new BorrowTransactionState ( newSpHandle , message ) ; } if ( message . getFragmentTaskMessage ( ) . isSysProcTask ( ) ) { final SysprocBorrowedTask task = new SysprocBorrowedTask ( m_mailbox , ( ParticipantTransactionState ) txn , m_pendingTasks , message . getFragmentTaskMessage ( ) , message . getInputDepMap ( ) ) ; task . setResponseNotBufferable ( ) ; m_pendingTasks . offer ( task ) ; } else { final BorrowedTask task = new BorrowedTask ( m_mailbox , ( ParticipantTransactionState ) txn , m_pendingTasks , message . getFragmentTaskMessage ( ) , message . getInputDepMap ( ) ) ; task . setResponseNotBufferable ( ) ; m_pendingTasks . offer ( task ) ; } }
to perform replicated reads or aggregation fragment work .
31,417
void handleFragmentTaskMessage ( FragmentTaskMessage message ) { FragmentTaskMessage msg = message ; long newSpHandle ; if ( ! message . isForReplica ( ) && ( m_isLeader || message . isExecutedOnPreviousLeader ( ) ) ) { msg = new FragmentTaskMessage ( message . getInitiatorHSId ( ) , message . getCoordinatorHSId ( ) , message ) ; msg . setTimestamp ( message . getTimestamp ( ) ) ; msg . setExecutedOnPreviousLeader ( message . isExecutedOnPreviousLeader ( ) ) ; if ( ! message . isReadOnly ( ) ) { TxnEgo ego = advanceTxnEgo ( ) ; newSpHandle = ego . getTxnId ( ) ; if ( m_outstandingTxns . get ( msg . getTxnId ( ) ) == null ) { updateMaxScheduledTransactionSpHandle ( newSpHandle ) ; } } else { newSpHandle = getMaxScheduledTxnSpHandle ( ) ; } msg . setSpHandle ( newSpHandle ) ; msg . setLastSpUniqueId ( m_uniqueIdGenerator . getLastUniqueId ( ) ) ; logRepair ( msg ) ; if ( msg . getInitiateTask ( ) != null ) { msg . getInitiateTask ( ) . setSpHandle ( newSpHandle ) ; msg . setStateForDurability ( msg . getInitiateTask ( ) , msg . getInvolvedPartitions ( ) ) ; } if ( IS_KSAFE_CLUSTER && ( ! message . isReadOnly ( ) || msg . isSysProcTask ( ) ) ) { for ( long hsId : m_sendToHSIds ) { FragmentTaskMessage finalMsg = msg ; final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPI ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . beginAsync ( "replicatefragment" , MiscUtils . hsIdPairTxnIdToString ( m_mailbox . getHSId ( ) , hsId , finalMsg . getSpHandle ( ) , finalMsg . getTxnId ( ) ) , "txnId" , TxnEgo . txnIdToString ( finalMsg . getTxnId ( ) ) , "dest" , CoreUtils . hsIdToString ( hsId ) ) ) ; } } FragmentTaskMessage replmsg = new FragmentTaskMessage ( m_mailbox . getHSId ( ) , m_mailbox . getHSId ( ) , msg ) ; replmsg . setForReplica ( true ) ; replmsg . setTimestamp ( msg . getTimestamp ( ) ) ; if ( m_sendToHSIds . length > 0 ) { m_mailbox . send ( m_sendToHSIds , replmsg ) ; } DuplicateCounter counter ; if ( message . getFragmentTaskType ( ) != FragmentTaskMessage . SYS_PROC_PER_SITE ) { counter = new DuplicateCounter ( msg . getCoordinatorHSId ( ) , msg . getTxnId ( ) , m_replicaHSIds , replmsg ) ; } else { counter = new SysProcDuplicateCounter ( msg . getCoordinatorHSId ( ) , msg . getTxnId ( ) , m_replicaHSIds , replmsg ) ; } safeAddToDuplicateCounterMap ( new DuplicateCounterKey ( message . getTxnId ( ) , newSpHandle ) , counter ) ; } } else { logRepair ( msg ) ; newSpHandle = msg . getSpHandle ( ) ; setMaxSeenTxnId ( newSpHandle ) ; } Iv2Trace . logFragmentTaskMessage ( message , m_mailbox . getHSId ( ) , newSpHandle , false ) ; doLocalFragmentOffer ( msg ) ; }
doesn t matter it isn t going to be used for anything .
31,418
public void offerPendingMPTasks ( long txnId ) { Queue < TransactionTask > pendingTasks = m_mpsPendingDurability . get ( txnId ) ; if ( pendingTasks != null ) { for ( TransactionTask task : pendingTasks ) { if ( task instanceof SpProcedureTask ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPI ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . endAsync ( "durability" , MiscUtils . hsIdTxnIdToString ( m_mailbox . getHSId ( ) , task . getSpHandle ( ) ) ) ) ; } } else if ( task instanceof FragmentTask ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPI ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . endAsync ( "durability" , MiscUtils . hsIdTxnIdToString ( m_mailbox . getHSId ( ) , ( ( FragmentTask ) task ) . m_fragmentMsg . getSpHandle ( ) ) ) ) ; } } m_pendingTasks . offer ( task ) ; } m_mpsPendingDurability . remove ( txnId ) ; } }
Offer all fragment tasks and complete transaction tasks queued for durability for the given MP transaction and remove the entry from the pending map so that future ones won t be queued .
31,419
private void queueOrOfferMPTask ( TransactionTask task ) { Queue < TransactionTask > pendingTasks = m_mpsPendingDurability . get ( task . getTxnId ( ) ) ; if ( pendingTasks != null ) { pendingTasks . offer ( task ) ; } else { m_pendingTasks . offer ( task ) ; } }
Check if the MP task has to be queued because the first fragment is still being logged synchronously to the command log . If not offer it to the transaction task queue .
31,420
private void handleIv2LogFaultMessage ( Iv2LogFaultMessage message ) { SettableFuture < Boolean > written = writeIv2ViableReplayEntryInternal ( message . getSpHandle ( ) ) ; blockFaultLogWriteStatus ( written ) ; setMaxSeenTxnId ( message . getSpHandle ( ) ) ; m_uniqueIdGenerator . updateMostRecentlyGeneratedUniqueId ( message . getSpUniqueId ( ) ) ; m_cl . initializeLastDurableUniqueId ( m_durabilityListener , m_uniqueIdGenerator . getLastUniqueId ( ) ) ; }
Should only receive these messages at replicas when told by the leader
31,421
private void blockFaultLogWriteStatus ( SettableFuture < Boolean > written ) { boolean logWritten = false ; if ( written != null ) { try { logWritten = written . get ( ) ; } catch ( InterruptedException e ) { } catch ( ExecutionException e ) { if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Could not determine fault log state for partition: " + m_partitionId , e ) ; } } if ( ! logWritten ) { tmLog . warn ( "Attempted fault log not written for partition: " + m_partitionId ) ; } } }
Wait to get the status of a fault log write
31,422
SettableFuture < Boolean > writeIv2ViableReplayEntryInternal ( long spHandle ) { SettableFuture < Boolean > written = null ; if ( m_replayComplete ) { written = m_cl . logIv2Fault ( m_mailbox . getHSId ( ) , new HashSet < Long > ( m_replicaHSIds ) , m_partitionId , spHandle ) ; } return written ; }
Write the viable replay set to the command log with the provided SP Handle . Pass back the future that is set after the fault log is written to disk .
31,423
public void updateReplicasFromMigrationLeaderFailedHost ( int failedHostId ) { List < Long > replicas = new ArrayList < > ( ) ; for ( long hsid : m_replicaHSIds ) { if ( failedHostId != CoreUtils . getHostIdFromHSId ( hsid ) ) { replicas . add ( hsid ) ; } } ( ( InitiatorMailbox ) m_mailbox ) . updateReplicas ( replicas , null ) ; }
update the duplicated counters after the host failure .
31,424
public void forwardPendingTaskToRejoinNode ( long [ ] replicasAdded , long snapshotSpHandle ) { if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Forward pending tasks in backlog to rejoin node: " + Arrays . toString ( replicasAdded ) ) ; } if ( replicasAdded . length == 0 ) { return ; } boolean sentAny = false ; for ( Map . Entry < DuplicateCounterKey , DuplicateCounter > entry : m_duplicateCounters . entrySet ( ) ) { if ( snapshotSpHandle < entry . getKey ( ) . m_spHandle ) { if ( ! sentAny ) { sentAny = true ; if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Start forwarding pending tasks to rejoin node." ) ; } } if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( entry . getValue ( ) . getOpenMessage ( ) . getMessageInfo ( ) ) ; } m_mailbox . send ( replicasAdded , entry . getValue ( ) . getOpenMessage ( ) ) ; } } if ( sentAny && tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Finish forwarding pending tasks to rejoin node." ) ; } }
first fragment of stream snapshot and site runs the first fragment .
31,425
public void cleanupTransactionBacklogOnRepair ( ) { if ( m_isLeader && m_sendToHSIds . length > 0 ) { m_mailbox . send ( m_sendToHSIds , new MPBacklogFlushMessage ( ) ) ; } Iterator < Entry < Long , TransactionState > > iter = m_outstandingTxns . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Entry < Long , TransactionState > entry = iter . next ( ) ; TransactionState txnState = entry . getValue ( ) ; if ( TxnEgo . getPartitionId ( entry . getKey ( ) ) == MpInitiator . MP_INIT_PID ) { if ( txnState . isReadOnly ( ) ) { txnState . setDone ( ) ; m_duplicateCounters . entrySet ( ) . removeIf ( ( e ) -> e . getKey ( ) . m_txnId == entry . getKey ( ) ) ; iter . remove ( ) ; } } } m_pendingTasks . removeMPReadTransactions ( ) ; }
site leaders also forward the message to its replicas .
31,426
synchronized void reset ( ) { schemaMap . clear ( ) ; sqlLookup . clear ( ) ; csidMap . clear ( ) ; sessionUseMap . clear ( ) ; useMap . clear ( ) ; next_cs_id = 0 ; }
Clears all internal data structures removing any references to compiled statements .
31,427
synchronized void resetStatements ( ) { Iterator it = csidMap . values ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Statement cs = ( Statement ) it . next ( ) ; cs . clearVariables ( ) ; } }
Used after a DDL change that could impact the compiled statements . Clears references to CompiledStatement objects while keeping the counts and references to the sql strings .
31,428
private long getStatementID ( HsqlName schema , String sql ) { LongValueHashMap sqlMap = ( LongValueHashMap ) schemaMap . get ( schema . hashCode ( ) ) ; if ( sqlMap == null ) { return - 1 ; } return sqlMap . get ( sql , - 1 ) ; }
Retrieves the registered compiled statement identifier associated with the specified SQL String or a value less than zero if no such statement has been registered .
31,429
public synchronized Statement getStatement ( Session session , long csid ) { Statement cs = ( Statement ) csidMap . get ( csid ) ; if ( cs == null ) { return null ; } if ( ! cs . isValid ( ) ) { String sql = ( String ) sqlLookup . get ( csid ) ; try { Session sys = database . sessionManager . getSysSession ( session . currentSchema . name , session . getUser ( ) ) ; cs = sys . compileStatement ( sql ) ; cs . setID ( csid ) ; csidMap . put ( csid , cs ) ; } catch ( Throwable t ) { freeStatement ( csid , session . getId ( ) , true ) ; return null ; } } return cs ; }
Returns an existing CompiledStatement object with the given statement identifier . Returns null if the CompiledStatement object has been invalidated and cannot be recompiled
31,430
private void linkSession ( long csid , long sessionID ) { LongKeyIntValueHashMap scsMap ; scsMap = ( LongKeyIntValueHashMap ) sessionUseMap . get ( sessionID ) ; if ( scsMap == null ) { scsMap = new LongKeyIntValueHashMap ( ) ; sessionUseMap . put ( sessionID , scsMap ) ; } int count = scsMap . get ( csid , 0 ) ; scsMap . put ( csid , count + 1 ) ; if ( count == 0 ) { useMap . put ( csid , useMap . get ( csid , 0 ) + 1 ) ; } }
Links a session with a registered compiled statement . If this session has not already been linked with the given statement then the statement use count is incremented .
31,431
private long registerStatement ( long csid , Statement cs ) { if ( csid < 0 ) { csid = nextID ( ) ; int schemaid = cs . getSchemaName ( ) . hashCode ( ) ; LongValueHashMap sqlMap = ( LongValueHashMap ) schemaMap . get ( schemaid ) ; if ( sqlMap == null ) { sqlMap = new LongValueHashMap ( ) ; schemaMap . put ( schemaid , sqlMap ) ; } sqlMap . put ( cs . getSQL ( ) , csid ) ; sqlLookup . put ( csid , cs . getSQL ( ) ) ; } cs . setID ( csid ) ; csidMap . put ( csid , cs ) ; return csid ; }
Registers a compiled statement to be managed .
31,432
synchronized void removeSession ( long sessionID ) { LongKeyIntValueHashMap scsMap ; long csid ; Iterator i ; scsMap = ( LongKeyIntValueHashMap ) sessionUseMap . remove ( sessionID ) ; if ( scsMap == null ) { return ; } i = scsMap . keySet ( ) . iterator ( ) ; while ( i . hasNext ( ) ) { csid = i . nextLong ( ) ; int usecount = useMap . get ( csid , 1 ) - 1 ; if ( usecount == 0 ) { Statement cs = ( Statement ) csidMap . remove ( csid ) ; if ( cs != null ) { int schemaid = cs . getSchemaName ( ) . hashCode ( ) ; LongValueHashMap sqlMap = ( LongValueHashMap ) schemaMap . get ( schemaid ) ; String sql = ( String ) sqlLookup . remove ( csid ) ; sqlMap . remove ( sql ) ; } useMap . remove ( csid ) ; } else { useMap . put ( csid , usecount ) ; } } }
Releases the link betwen the session and all compiled statement objects it is linked to . If any such statement is not linked with any other session it is removed from management .
31,433
synchronized Statement compile ( Session session , Result cmd ) throws Throwable { String sql = cmd . getMainString ( ) ; long csid = getStatementID ( session . currentSchema , sql ) ; Statement cs = ( Statement ) csidMap . get ( csid ) ; if ( cs == null || ! cs . isValid ( ) || ! session . isAdmin ( ) ) { Session sys = database . sessionManager . getSysSession ( session . currentSchema . name , session . getUser ( ) ) ; cs = sys . compileStatement ( sql ) ; csid = registerStatement ( csid , cs ) ; } linkSession ( csid , session . getId ( ) ) ; return cs ; }
Compiles an SQL statement and returns a CompiledStatement Object
31,434
private void startupInstance ( ) throws IOException { assert ( m_blockPathMap . isEmpty ( ) ) ; try { clearSwapDir ( ) ; } catch ( Exception e ) { throw new IOException ( "Unable to clear large query swap directory: " + e . getMessage ( ) ) ; } }
On startup clear out the large query swap directory .
31,435
void storeBlock ( BlockId blockId , ByteBuffer block ) throws IOException { synchronized ( m_accessLock ) { if ( m_blockPathMap . containsKey ( blockId ) ) { throw new IllegalArgumentException ( "Request to store block that is already stored: " + blockId . toString ( ) ) ; } int origPosition = block . position ( ) ; block . position ( 0 ) ; Path blockPath = makeBlockPath ( blockId ) ; try ( SeekableByteChannel channel = Files . newByteChannel ( blockPath , OPEN_OPTIONS , PERMISSIONS ) ) { channel . write ( block ) ; } finally { block . position ( origPosition ) ; } m_blockPathMap . put ( blockId , blockPath ) ; } }
Store the given block with the given ID to disk .
31,436
void loadBlock ( BlockId blockId , ByteBuffer block ) throws IOException { synchronized ( m_accessLock ) { if ( ! m_blockPathMap . containsKey ( blockId ) ) { throw new IllegalArgumentException ( "Request to load block that is not stored: " + blockId ) ; } int origPosition = block . position ( ) ; block . position ( 0 ) ; Path blockPath = m_blockPathMap . get ( blockId ) ; try ( SeekableByteChannel channel = Files . newByteChannel ( blockPath ) ) { channel . read ( block ) ; } finally { block . position ( origPosition ) ; } } }
Read the block with the given ID into the given byte buffer .
31,437
void releaseBlock ( BlockId blockId ) throws IOException { synchronized ( m_accessLock ) { if ( ! m_blockPathMap . containsKey ( blockId ) ) { throw new IllegalArgumentException ( "Request to release block that is not stored: " + blockId ) ; } Path blockPath = m_blockPathMap . get ( blockId ) ; Files . delete ( blockPath ) ; m_blockPathMap . remove ( blockId ) ; } }
The block with the given site id and block counter is no longer needed so delete it from disk .
31,438
private void releaseAllBlocks ( ) throws IOException { synchronized ( m_accessLock ) { Set < Map . Entry < BlockId , Path > > entries = m_blockPathMap . entrySet ( ) ; while ( ! entries . isEmpty ( ) ) { Map . Entry < BlockId , Path > entry = entries . iterator ( ) . next ( ) ; Files . delete ( entry . getValue ( ) ) ; m_blockPathMap . remove ( entry . getKey ( ) ) ; entries = m_blockPathMap . entrySet ( ) ; } } }
Release all the blocks that are on disk and delete them from the map that tracks them .
31,439
Path makeBlockPath ( BlockId id ) { String filename = id . fileNameString ( ) ; return m_largeQuerySwapPath . resolve ( filename ) ; }
Given package visibility for unit testing purposes .
31,440
public static List < Shard > discoverShards ( String regionName , String streamName , String accessKey , String secretKey , String appName ) { try { Region region = RegionUtils . getRegion ( regionName ) ; if ( region != null ) { final AWSCredentials credentials = new BasicAWSCredentials ( accessKey , secretKey ) ; AmazonKinesis kinesisClient = new AmazonKinesisClient ( credentials , getClientConfigWithUserAgent ( appName ) ) ; kinesisClient . setRegion ( region ) ; DescribeStreamResult result = kinesisClient . describeStream ( streamName ) ; if ( ! "ACTIVE" . equals ( result . getStreamDescription ( ) . getStreamStatus ( ) ) ) { throw new IllegalArgumentException ( "Kinesis stream " + streamName + " is not active." ) ; } return result . getStreamDescription ( ) . getShards ( ) ; } } catch ( ResourceNotFoundException e ) { LOGGER . warn ( "Kinesis stream " + streamName + " does not exist." , e ) ; } catch ( Exception e ) { LOGGER . warn ( "Error found while describing the kinesis stream " + streamName , e ) ; } return null ; }
connect to kinesis stream to discover the shards on the stream
31,441
public static String getProperty ( Properties props , String propertyName , String defaultValue ) { String value = props . getProperty ( propertyName , defaultValue ) . trim ( ) ; if ( value . isEmpty ( ) ) { throw new IllegalArgumentException ( "Property " + propertyName + " is missing in Kinesis importer configuration." ) ; } return value ; }
get property value . If no value is available throw IllegalArgumentException
31,442
public static long getPropertyAsLong ( Properties props , String propertyName , long defaultValue ) { String value = props . getProperty ( propertyName , "" ) . trim ( ) ; if ( value . isEmpty ( ) ) { return defaultValue ; } try { long val = Long . parseLong ( value ) ; if ( val <= 0 ) { throw new IllegalArgumentException ( "Value of " + propertyName + " should be positive, but current value is " + val ) ; } return val ; } catch ( NumberFormatException e ) { throw new IllegalArgumentException ( "Property " + propertyName + " must be a number in Kinesis importer configuration." ) ; } }
get property value as long .
31,443
public synchronized boolean addChild ( String child ) { if ( children == null ) { children = new HashSet < String > ( 8 ) ; } return children . add ( child ) ; }
Method that inserts a child into the children set
31,444
protected String getHostHeader ( ) { if ( m_hostHeader != null ) { return m_hostHeader ; } if ( ! httpAdminListener . m_publicIntf . isEmpty ( ) ) { m_hostHeader = httpAdminListener . m_publicIntf ; return m_hostHeader ; } InetAddress addr = null ; int httpPort = VoltDB . DEFAULT_HTTP_PORT ; try { String localMetadata = VoltDB . instance ( ) . getLocalMetadata ( ) ; JSONObject jsObj = new JSONObject ( localMetadata ) ; JSONArray interfaces = jsObj . getJSONArray ( "interfaces" ) ; String iface = interfaces . getString ( 0 ) ; addr = InetAddress . getByName ( iface ) ; httpPort = jsObj . getInt ( "httpPort" ) ; } catch ( Exception e ) { m_log . warn ( "Failed to get HTTP interface information." , e ) ; } if ( addr == null ) { addr = org . voltcore . utils . CoreUtils . getLocalAddress ( ) ; } m_hostHeader = addr . getHostAddress ( ) + ":" + httpPort ; return m_hostHeader ; }
like behind a NATed network .
31,445
void handleReportPage ( HttpServletRequest request , HttpServletResponse response ) { try { String report = ReportMaker . liveReport ( ) ; response . setContentType ( HTML_CONTENT_TYPE ) ; response . setStatus ( HttpServletResponse . SC_OK ) ; response . getWriter ( ) . print ( report ) ; } catch ( IOException ex ) { m_log . warn ( "Failed to get catalog report." , ex ) ; } }
Draw the catalog report page mostly by pulling it from the JAR .
31,446
public int getIntegerProperty ( String key , int defaultValue , int [ ] values ) { String prop = getProperty ( key ) ; int value = defaultValue ; try { if ( prop != null ) { value = Integer . parseInt ( prop ) ; } } catch ( NumberFormatException e ) { } if ( ArrayUtil . find ( values , value ) == - 1 ) { return defaultValue ; } return value ; }
Choice limited to values list defaultValue must be in the values list .
31,447
public void save ( ) throws Exception { if ( fileName == null || fileName . length ( ) == 0 ) { throw new java . io . FileNotFoundException ( Error . getMessage ( ErrorCode . M_HsqlProperties_load ) ) ; } String filestring = fileName + ".properties" ; save ( filestring ) ; }
Saves the properties .
31,448
public void save ( String fileString ) throws Exception { fa . createParentDirs ( fileString ) ; OutputStream fos = fa . openOutputStreamElement ( fileString ) ; FileAccess . FileSync outDescriptor = fa . getFileSync ( fos ) ; JavaSystem . saveProperties ( stringProps , HsqlDatabaseProperties . PRODUCT_NAME + " " + HsqlDatabaseProperties . THIS_FULL_VERSION , fos ) ; fos . flush ( ) ; outDescriptor . sync ( ) ; fos . close ( ) ; return ; }
Saves the properties using JDK2 method if present otherwise JDK1 .
31,449
private void addError ( int code , String key ) { errorCodes = ( int [ ] ) ArrayUtil . resizeArray ( errorCodes , errorCodes . length + 1 ) ; errorKeys = ( String [ ] ) ArrayUtil . resizeArray ( errorKeys , errorKeys . length + 1 ) ; errorCodes [ errorCodes . length - 1 ] = code ; errorKeys [ errorKeys . length - 1 ] = key ; }
Adds the error code and the key to the list of errors . This list is populated during construction or addition of elements and is used outside this class to act upon the errors .
31,450
public void checkPassword ( String value ) { if ( ! value . equals ( password ) ) { throw Error . error ( ErrorCode . X_28000 ) ; } }
Checks if this object s password attibute equals specified argument else throws .
31,451
public String getCreateUserSQL ( ) { StringBuffer sb = new StringBuffer ( 64 ) ; sb . append ( Tokens . T_CREATE ) . append ( ' ' ) ; sb . append ( Tokens . T_USER ) . append ( ' ' ) ; sb . append ( getStatementName ( ) ) . append ( ' ' ) ; sb . append ( Tokens . T_PASSWORD ) . append ( ' ' ) ; sb . append ( StringConverter . toQuotedString ( password , '"' , true ) ) ; return sb . toString ( ) ; }
Returns the DDL string sequence that creates this user .
31,452
public String getConnectUserSQL ( ) { StringBuffer sb = new StringBuffer ( ) ; sb . append ( Tokens . T_SET ) . append ( ' ' ) ; sb . append ( Tokens . T_SESSION ) . append ( ' ' ) ; sb . append ( Tokens . T_AUTHORIZATION ) . append ( ' ' ) ; sb . append ( StringConverter . toQuotedString ( getNameString ( ) , '\'' , true ) ) ; return sb . toString ( ) ; }
Retrieves the redo log character sequence for connecting this user
31,453
static ImmutableMap < String , PublicSuffixType > parseTrie ( CharSequence encoded ) { ImmutableMap . Builder < String , PublicSuffixType > builder = ImmutableMap . builder ( ) ; int encodedLen = encoded . length ( ) ; int idx = 0 ; while ( idx < encodedLen ) { idx += doParseTrieToBuilder ( Lists . < CharSequence > newLinkedList ( ) , encoded . subSequence ( idx , encodedLen ) , builder ) ; } return builder . build ( ) ; }
Parses a serialized trie representation of a map of reversed public suffixes into an immutable map of public suffixes .
31,454
private static int doParseTrieToBuilder ( List < CharSequence > stack , CharSequence encoded , ImmutableMap . Builder < String , PublicSuffixType > builder ) { int encodedLen = encoded . length ( ) ; int idx = 0 ; char c = '\0' ; for ( ; idx < encodedLen ; idx ++ ) { c = encoded . charAt ( idx ) ; if ( c == '&' || c == '?' || c == '!' || c == ':' || c == ',' ) { break ; } } stack . add ( 0 , reverse ( encoded . subSequence ( 0 , idx ) ) ) ; if ( c == '!' || c == '?' || c == ':' || c == ',' ) { String domain = PREFIX_JOINER . join ( stack ) ; if ( domain . length ( ) > 0 ) { builder . put ( domain , PublicSuffixType . fromCode ( c ) ) ; } } idx ++ ; if ( c != '?' && c != ',' ) { while ( idx < encodedLen ) { idx += doParseTrieToBuilder ( stack , encoded . subSequence ( idx , encodedLen ) , builder ) ; if ( encoded . charAt ( idx ) == '?' || encoded . charAt ( idx ) == ',' ) { idx ++ ; break ; } } } stack . remove ( 0 ) ; return idx ; }
Parses a trie node and returns the number of characters consumed .
31,455
public static synchronized void initialize ( int myHostId , CatalogContext catalogContext , boolean isRejoin , boolean forceCreate , HostMessenger messenger , List < Pair < Integer , Integer > > partitions ) throws ExportManager . SetupException { ExportManager em = new ExportManager ( myHostId , catalogContext , messenger ) ; m_self = em ; if ( forceCreate ) { em . clearOverflowData ( ) ; } em . initialize ( catalogContext , partitions , isRejoin ) ; RealVoltDB db = ( RealVoltDB ) VoltDB . instance ( ) ; db . getStatsAgent ( ) . registerStatsSource ( StatsSelector . EXPORT , myHostId , em . getExportStats ( ) ) ; }
FIXME - this synchronizes on the ExportManager class but everyone else synchronizes on the instance .
31,456
private void initialize ( CatalogContext catalogContext , List < Pair < Integer , Integer > > localPartitionsToSites , boolean isRejoin ) { try { CatalogMap < Connector > connectors = CatalogUtil . getConnectors ( catalogContext ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "initialize for " + connectors . size ( ) + " connectors." ) ; CatalogUtil . dumpConnectors ( exportLog , connectors ) ; } if ( ! CatalogUtil . hasExportedTables ( connectors ) ) { return ; } if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Creating processor " + m_loaderClass ) ; } ExportDataProcessor newProcessor = getNewProcessorWithProcessConfigSet ( m_processorConfig ) ; m_processor . set ( newProcessor ) ; File exportOverflowDirectory = new File ( VoltDB . instance ( ) . getExportOverflowPath ( ) ) ; ExportGeneration generation = new ExportGeneration ( exportOverflowDirectory , m_messenger ) ; generation . initialize ( m_hostId , catalogContext , connectors , newProcessor , localPartitionsToSites , exportOverflowDirectory ) ; m_generation . set ( generation ) ; newProcessor . setExportGeneration ( generation ) ; newProcessor . readyForData ( ) ; } catch ( final ClassNotFoundException e ) { exportLog . l7dlog ( Level . ERROR , LogKeys . export_ExportManager_NoLoaderExtensions . name ( ) , e ) ; throw new RuntimeException ( e ) ; } catch ( final Exception e ) { exportLog . error ( "Initialize failed with:" , e ) ; throw new RuntimeException ( e ) ; } }
Creates the initial export processor if export is enabled
31,457
private void swapWithNewProcessor ( final CatalogContext catalogContext , ExportGeneration generation , CatalogMap < Connector > connectors , List < Pair < Integer , Integer > > partitions , Map < String , Pair < Properties , Set < String > > > config ) { ExportDataProcessor oldProcessor = m_processor . get ( ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Shutdown guestprocessor" ) ; } oldProcessor . shutdown ( ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Processor shutdown completed, install new export processor" ) ; } generation . unacceptMastership ( ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Existing export datasources unassigned." ) ; } try { ExportDataProcessor newProcessor = getNewProcessorWithProcessConfigSet ( config ) ; generation . initializeGenerationFromCatalog ( catalogContext , connectors , newProcessor , m_hostId , partitions , true ) ; for ( Pair < Integer , Integer > partition : partitions ) { generation . updateAckMailboxes ( partition . getFirst ( ) , null ) ; } newProcessor . setExportGeneration ( generation ) ; if ( m_startPolling && ! config . isEmpty ( ) ) { newProcessor . startPolling ( ) ; } m_processor . getAndSet ( newProcessor ) ; newProcessor . readyForData ( ) ; } catch ( Exception crash ) { VoltDB . crashLocalVoltDB ( "Error creating next export processor" , true , crash ) ; } for ( int partitionId : m_masterOfPartitions ) { generation . acceptMastership ( partitionId ) ; } }
remove and install new processor
31,458
public String calculateContentDeterminismMessage ( ) { String ans = getContentDeterminismMessage ( ) ; if ( ans != null ) { return ans ; } if ( m_subquery != null ) { updateContentDeterminismMessage ( m_subquery . calculateContentDeterminismMessage ( ) ) ; return getContentDeterminismMessage ( ) ; } if ( m_columns != null ) { for ( AbstractExpression expr : m_columns . values ( ) ) { String emsg = expr . getContentDeterminismMessage ( ) ; if ( emsg != null ) { updateContentDeterminismMessage ( emsg ) ; return emsg ; } } } return null ; }
Return the content determinism string of the subquery if there is one .
31,459
public int getSortIndexOfOrderByExpression ( AbstractExpression partitionByExpression ) { for ( int idx = 0 ; idx < m_orderByExpressions . size ( ) ; ++ idx ) { if ( m_orderByExpressions . get ( idx ) . equals ( partitionByExpression ) ) { return idx ; } } return - 1 ; }
Return the index of the given partition by expression in the order by list . This is used when trying to rationalize partition by and order by expressions .
31,460
private boolean rewriteSelectStmt ( ) { if ( m_mvi != null ) { final Table view = m_mvi . getDest ( ) ; final String viewName = view . getTypeName ( ) ; m_selectStmt . getFinalProjectionSchema ( ) . resetTableName ( viewName , viewName ) . toTVEAndFixColumns ( m_QueryColumnNameAndIndx_to_MVColumnNameAndIndx . entrySet ( ) . stream ( ) . collect ( Collectors . toMap ( kv -> kv . getKey ( ) . getFirst ( ) , Map . Entry :: getValue ) ) ) ; final Map < Integer , Pair < String , Integer > > colSubIndx = m_QueryColumnNameAndIndx_to_MVColumnNameAndIndx . entrySet ( ) . stream ( ) . collect ( Collectors . toMap ( kv -> kv . getKey ( ) . getSecond ( ) , Map . Entry :: getValue ) ) ; ParsedSelectStmt . updateTableNames ( m_selectStmt . m_aggResultColumns , viewName ) ; ParsedSelectStmt . fixColumns ( m_selectStmt . m_aggResultColumns , colSubIndx ) ; ParsedSelectStmt . updateTableNames ( m_selectStmt . m_displayColumns , viewName ) ; ParsedSelectStmt . fixColumns ( m_selectStmt . m_displayColumns , colSubIndx ) ; m_selectStmt . rewriteAsMV ( view ) ; m_mvi = null ; return true ; } else { return m_selectStmt . allScans ( ) . stream ( ) . map ( scan -> scan instanceof StmtSubqueryScan && rewriteTableAlias ( ( StmtSubqueryScan ) scan ) ) . reduce ( Boolean :: logicalOr ) . get ( ) ; } }
Try to rewrite SELECT stmt if there is a matching materialized view .
31,461
private static boolean rewriteTableAlias ( StmtSubqueryScan scan ) { final AbstractParsedStmt stmt = scan . getSubqueryStmt ( ) ; return stmt instanceof ParsedSelectStmt && ( new MVQueryRewriter ( ( ParsedSelectStmt ) stmt ) ) . rewrite ( ) ; }
Checks for any opportunity to rewrite sub - queries
31,462
private static List < Integer > extractTVEIndices ( AbstractExpression e , List < Integer > accum ) { if ( e != null ) { if ( e instanceof TupleValueExpression ) { accum . add ( ( ( TupleValueExpression ) e ) . getColumnIndex ( ) ) ; } else { extractTVEIndices ( e . getRight ( ) , extractTVEIndices ( e . getLeft ( ) , accum ) ) ; if ( e . getArgs ( ) != null ) { e . getArgs ( ) . forEach ( ex -> extractTVEIndices ( ex , accum ) ) ; } } } return accum ; }
Helper method to extract all TVE column indices from an expression .
31,463
private Map < Pair < String , Integer > , Pair < String , Integer > > gbyMatches ( MaterializedViewInfo mv ) { final FilterMatcher filter = new FilterMatcher ( m_selectStmt . m_joinTree . getJoinExpression ( ) , predicate_of ( mv ) ) ; if ( filter . match ( ) && gbyTablesEqual ( mv ) && gbyColumnsMatch ( mv ) ) { return getViewColumnMaps ( mv ) ; } else { return null ; } }
Apply matching rules of SELECT stmt against a materialized view and gives back column relationship between the two .
31,464
private static Map < MaterializedViewInfo , Table > getMviAndViews ( List < Table > tbls ) { return tbls . stream ( ) . flatMap ( tbl -> StreamSupport . stream ( ( ( Iterable < MaterializedViewInfo > ) ( ) -> tbl . getViews ( ) . iterator ( ) ) . spliterator ( ) , false ) . map ( mv -> Pair . of ( mv , mv . getDest ( ) ) ) ) . collect ( Collectors . toMap ( Pair :: getFirst , Pair :: getSecond ) ) ; }
returns all materialized view info = > view table from table list
31,465
private static AbstractExpression transformExpressionRidofPVE ( AbstractExpression src ) { AbstractExpression left = src . getLeft ( ) , right = src . getRight ( ) ; if ( left != null ) { left = transformExpressionRidofPVE ( left ) ; } if ( right != null ) { right = transformExpressionRidofPVE ( right ) ; } final AbstractExpression dst ; if ( src instanceof ParameterValueExpression ) { assert ( ( ( ParameterValueExpression ) src ) . getOriginalValue ( ) != null ) ; dst = ( ( ParameterValueExpression ) src ) . getOriginalValue ( ) . clone ( ) ; } else { dst = src . clone ( ) ; } dst . setLeft ( left ) ; dst . setRight ( right ) ; return dst ; }
For scope of ENG - 2878 caching would not cause this trouble because parameter
31,466
private static List < AbstractExpression > getGbyExpressions ( MaterializedViewInfo mv ) { try { return AbstractExpression . fromJSONArrayString ( mv . getGroupbyexpressionsjson ( ) , null ) ; } catch ( JSONException e ) { return new ArrayList < > ( ) ; } }
Get group - by expression
31,467
private boolean isNpTxn ( Iv2InitiateTaskMessage msg ) { return msg . getStoredProcedureName ( ) . startsWith ( "@" ) && msg . getStoredProcedureName ( ) . equalsIgnoreCase ( "@BalancePartitions" ) && ( byte ) msg . getParameters ( ) [ 1 ] != 1 ; }
Hacky way to only run
31,468
private Set < Integer > getBalancePartitions ( Iv2InitiateTaskMessage msg ) { try { JSONObject jsObj = new JSONObject ( ( String ) msg . getParameters ( ) [ 0 ] ) ; BalancePartitionsRequest request = new BalancePartitionsRequest ( jsObj ) ; return Sets . newHashSet ( request . partitionPairs . get ( 0 ) . srcPartition , request . partitionPairs . get ( 0 ) . destPartition ) ; } catch ( JSONException e ) { hostLog . warn ( "Unable to determine partitions for @BalancePartitions" , e ) ; return null ; } }
Extract the two involved partitions from the
31,469
public void handleInitiateResponseMessage ( InitiateResponseMessage message ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . MPI ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . endAsync ( "initmp" , message . getTxnId ( ) ) ) ; } DuplicateCounter counter = m_duplicateCounters . get ( message . getTxnId ( ) ) ; if ( counter != null && message . isMisrouted ( ) ) { tmLog . info ( "The message on the partition is misrouted. TxnID: " + TxnEgo . txnIdToString ( message . getTxnId ( ) ) ) ; Long newLeader = m_leaderMigrationMap . get ( message . m_sourceHSId ) ; if ( newLeader != null ) { counter . updateReplica ( message . m_sourceHSId , newLeader ) ; m_leaderMigrationMap . remove ( message . m_sourceHSId ) ; m_mailbox . send ( newLeader , counter . getOpenMessage ( ) ) ; } else { m_mailbox . send ( message . m_sourceHSId , counter . getOpenMessage ( ) ) ; } return ; } if ( counter != null ) { int result = counter . offer ( message ) ; if ( result == DuplicateCounter . DONE ) { m_duplicateCounters . remove ( message . getTxnId ( ) ) ; if ( message . shouldCommit ( ) && message . haveSentMpFragment ( ) ) { m_repairLogTruncationHandle = m_repairLogAwaitingCommit ; m_repairLogAwaitingCommit = message . getTxnId ( ) ; } m_outstandingTxns . remove ( message . getTxnId ( ) ) ; m_mailbox . send ( counter . m_destinationId , message ) ; } else if ( result == DuplicateCounter . MISMATCH ) { VoltDB . crashLocalVoltDB ( "HASH MISMATCH running every-site system procedure." , true , null ) ; } else if ( result == DuplicateCounter . ABORT ) { VoltDB . crashLocalVoltDB ( "PARTIAL ROLLBACK/ABORT running every-site system procedure." , true , null ) ; } } else { if ( message . shouldCommit ( ) && message . haveSentMpFragment ( ) ) { m_repairLogTruncationHandle = m_repairLogAwaitingCommit ; m_repairLogAwaitingCommit = message . getTxnId ( ) ; } MpTransactionState txn = ( MpTransactionState ) m_outstandingTxns . remove ( message . getTxnId ( ) ) ; assert ( txn != null ) ; m_mailbox . send ( message . getInitiatorHSId ( ) , message ) ; CompleteTransactionMessage ctm = new CompleteTransactionMessage ( m_mailbox . getHSId ( ) , message . m_sourceHSId , message . getTxnId ( ) , message . isReadOnly ( ) , 0 , ! message . shouldCommit ( ) , false , false , false , txn . isNPartTxn ( ) , message . m_isFromNonRestartableSysproc , false ) ; ctm . setTruncationHandle ( m_repairLogTruncationHandle ) ; ( ( MpInitiatorMailbox ) m_mailbox ) . deliverToRepairLog ( ctm ) ; } }
see all of these messages and control their transmission .
31,470
public void handleEOLMessage ( ) { Iv2EndOfLogMessage msg = new Iv2EndOfLogMessage ( m_partitionId ) ; MPIEndOfLogTransactionState txnState = new MPIEndOfLogTransactionState ( msg ) ; MPIEndOfLogTask task = new MPIEndOfLogTask ( m_mailbox , m_pendingTasks , txnState , m_iv2Masters ) ; m_pendingTasks . offer ( task ) ; }
Inject a task into the transaction task queue to flush it . When it executes it will send out MPI end of log messages to all partition initiators .
31,471
private static ProClass < MpProcedureTask > loadNpProcedureTaskClass ( ) { return ProClass . < MpProcedureTask > load ( "org.voltdb.iv2.NpProcedureTask" , "N-Partition" , MiscUtils . isPro ( ) ? ProClass . HANDLER_LOG : ProClass . HANDLER_IGNORE ) . errorHandler ( tmLog :: error ) . useConstructorFor ( Mailbox . class , String . class , TransactionTaskQueue . class , Iv2InitiateTaskMessage . class , Map . class , long . class , boolean . class , int . class ) ; }
Load the pro class for n - partition transactions .
31,472
void safeAddToDuplicateCounterMap ( long dpKey , DuplicateCounter counter ) { DuplicateCounter existingDC = m_duplicateCounters . get ( dpKey ) ; if ( existingDC != null ) { existingDC . logWithCollidingDuplicateCounters ( counter ) ; VoltDB . crashGlobalVoltDB ( "DUPLICATE COUNTER MISMATCH: two duplicate counter keys collided." , true , null ) ; } else { m_duplicateCounters . put ( dpKey , counter ) ; } }
Just using put on the dup counter map is unsafe . It won t detect the case where keys collide from two different transactions .
31,473
Table ADMINISTRABLE_ROLE_AUTHORIZATIONS ( ) { Table t = sysTables [ ADMINISTRABLE_ROLE_AUTHORIZATIONS ] ; if ( t == null ) { t = createBlankTable ( sysTableHsqlNames [ ADMINISTRABLE_ROLE_AUTHORIZATIONS ] ) ; addColumn ( t , "GRANTEE" , SQL_IDENTIFIER ) ; addColumn ( t , "ROLE_NAME" , SQL_IDENTIFIER ) ; addColumn ( t , "IS_GRANTABLE" , SQL_IDENTIFIER ) ; HsqlName name = HsqlNameManager . newInfoSchemaObjectName ( sysTableHsqlNames [ ADMINISTRABLE_ROLE_AUTHORIZATIONS ] . name , false , SchemaObject . INDEX ) ; t . createPrimaryKey ( name , new int [ ] { 0 , 1 , 2 } , false ) ; return t ; } if ( session . isAdmin ( ) ) { insertRoles ( t , session . getGrantee ( ) , true ) ; } return t ; }
Returns roles that are grantable by an admin user which means all the roles
31,474
Table ROUTINE_ROUTINE_USAGE ( ) { Table t = sysTables [ ROUTINE_ROUTINE_USAGE ] ; if ( t == null ) { t = createBlankTable ( sysTableHsqlNames [ ROUTINE_ROUTINE_USAGE ] ) ; addColumn ( t , "SPECIFIC_CATALOG" , SQL_IDENTIFIER ) ; addColumn ( t , "SPECIFIC_SCHEMA" , SQL_IDENTIFIER ) ; addColumn ( t , "SPECIFIC_NAME" , SQL_IDENTIFIER ) ; addColumn ( t , "ROUTINE_CATALOG" , SQL_IDENTIFIER ) ; addColumn ( t , "ROUTINE_SCHEMA" , SQL_IDENTIFIER ) ; addColumn ( t , "ROUTINE_NAME" , SQL_IDENTIFIER ) ; HsqlName name = HsqlNameManager . newInfoSchemaObjectName ( sysTableHsqlNames [ ROUTINE_ROUTINE_USAGE ] . name , false , SchemaObject . INDEX ) ; t . createPrimaryKey ( name , new int [ ] { 0 , 1 , 2 , 3 , 4 , 5 } , false ) ; return t ; } final int specific_catalog = 0 ; final int specific_schema = 1 ; final int specific_name = 2 ; final int routine_catalog = 3 ; final int routine_schema = 4 ; final int routine_name = 5 ; PersistentStore store = database . persistentStoreCollection . getStore ( t ) ; Iterator it ; Object [ ] row ; it = database . schemaManager . databaseObjectIterator ( SchemaObject . ROUTINE ) ; while ( it . hasNext ( ) ) { RoutineSchema routine = ( RoutineSchema ) it . next ( ) ; if ( ! session . getGrantee ( ) . isAccessible ( routine ) ) { continue ; } Routine [ ] specifics = routine . getSpecificRoutines ( ) ; for ( int m = 0 ; m < specifics . length ; m ++ ) { OrderedHashSet set = specifics [ m ] . getReferences ( ) ; for ( int i = 0 ; i < set . size ( ) ; i ++ ) { HsqlName refName = ( HsqlName ) set . get ( i ) ; if ( refName . type != SchemaObject . FUNCTION && refName . type != SchemaObject . PROCEDURE ) { continue ; } if ( ! session . getGrantee ( ) . isAccessible ( refName ) ) { continue ; } row = t . getEmptyRowData ( ) ; row [ specific_catalog ] = database . getCatalogName ( ) . name ; row [ specific_schema ] = specifics [ m ] . getSchemaName ( ) . name ; row [ specific_name ] = specifics [ m ] . getName ( ) . name ; row [ routine_catalog ] = database . getCatalogName ( ) . name ; row [ routine_schema ] = refName . schema . name ; row [ routine_name ] = refName . name ; try { t . insertSys ( store , row ) ; } catch ( HsqlException e ) { } } } } return t ; }
needs to provide list of specific referenced routines
31,475
public ListenableFuture < ? > closeAndDelete ( ) { m_closed = true ; m_ackMailboxRefs . set ( null ) ; m_mastershipAccepted . set ( false ) ; try { if ( m_pollTask != null ) { m_pollTask . setFuture ( null ) ; } } catch ( RejectedExecutionException reex ) { } m_pollTask = null ; return m_es . submit ( new Runnable ( ) { public void run ( ) { try { AckingContainer ack = m_pendingContainer . getAndSet ( null ) ; if ( ack != null ) { if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Discard pending container, lastSeqNo: " + ack . getLastSeqNo ( ) ) ; } ack . internalDiscard ( ) ; } m_committedBuffers . closeAndDelete ( ) ; m_adFile . delete ( ) ; } catch ( IOException e ) { exportLog . rateLimitedLog ( 60 , Level . WARN , e , "Error closing commit buffers" ) ; } finally { m_es . shutdown ( ) ; } } } ) ; }
This is called on updateCatalog when an exporting stream is dropped .
31,476
public void setPendingContainer ( AckingContainer container ) { Preconditions . checkNotNull ( m_pendingContainer . get ( ) != null , "Pending container must be null." ) ; if ( m_closed ) { exportLog . info ( "Discarding stale pending container" ) ; container . internalDiscard ( ) ; } else { m_pendingContainer . set ( container ) ; } }
Needs to be thread - safe EDS executor export decoder and site thread both touch m_pendingContainer .
31,477
public void remoteAck ( final long seq ) { m_es . execute ( new Runnable ( ) { public void run ( ) { try { if ( ! m_es . isShutdown ( ) && ! m_mastershipAccepted . get ( ) ) { setCommittedSeqNo ( seq ) ; ackImpl ( seq ) ; } } catch ( Exception e ) { exportLog . error ( "Error acking export buffer" , e ) ; } catch ( Error e ) { VoltDB . crashLocalVoltDB ( "Error acking export buffer" , true , e ) ; } } } ) ; }
Entry point for receiving acknowledgments from remote entities .
31,478
private void handleDrainedSource ( ) throws IOException { if ( ! inCatalog ( ) && m_committedBuffers . isEmpty ( ) ) { try { if ( m_pollTask != null ) { m_pollTask . setFuture ( null ) ; } } catch ( RejectedExecutionException reex ) { } m_pollTask = null ; m_generation . onSourceDrained ( m_partitionId , m_tableName ) ; return ; } }
Notify the generation when source is drained on an unused partition .
31,479
public synchronized void acceptMastership ( ) { if ( m_onMastership == null ) { if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Mastership Runnable not yet set for table " + getTableName ( ) + " partition " + getPartitionId ( ) ) ; } return ; } if ( m_mastershipAccepted . get ( ) ) { if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Export table " + getTableName ( ) + " mastership already accepted for partition " + getPartitionId ( ) ) ; } return ; } m_es . execute ( new Runnable ( ) { public void run ( ) { try { if ( ! m_es . isShutdown ( ) || ! m_closed ) { if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Export table " + getTableName ( ) + " accepting mastership for partition " + getPartitionId ( ) ) ; } if ( m_mastershipAccepted . compareAndSet ( false , true ) ) { m_queryResponses . clear ( ) ; m_onMastership . run ( ) ; } } } catch ( Exception e ) { exportLog . error ( "Error in accepting mastership" , e ) ; } } } ) ; }
Trigger an execution of the mastership runnable by the associated executor service
31,480
public void setOnMastership ( Runnable toBeRunOnMastership ) { Preconditions . checkNotNull ( toBeRunOnMastership , "mastership runnable is null" ) ; m_onMastership = toBeRunOnMastership ; if ( m_runEveryWhere ) { m_ackMailboxRefs . set ( null ) ; acceptMastership ( ) ; } }
set the runnable task that is to be executed on mastership designation
31,481
public void handleQueryMessage ( final long senderHSId , long requestId , long gapStart ) { m_es . execute ( new Runnable ( ) { public void run ( ) { long lastSeq = Long . MIN_VALUE ; Pair < Long , Long > range = m_gapTracker . getRangeContaining ( gapStart ) ; if ( range != null ) { lastSeq = range . getSecond ( ) ; } sendQueryResponse ( senderHSId , requestId , lastSeq ) ; } } ) ; }
Query whether a master exists for the given partition if not try to promote the local data source .
31,482
private void resetStateInRejoinOrRecover ( long initialSequenceNumber , boolean isRejoin ) { if ( isRejoin ) { if ( ! m_gapTracker . isEmpty ( ) ) { m_lastReleasedSeqNo = Math . max ( m_lastReleasedSeqNo , m_gapTracker . getFirstSeqNo ( ) - 1 ) ; } } else { m_lastReleasedSeqNo = Math . max ( m_lastReleasedSeqNo , initialSequenceNumber ) ; } m_committedSeqNo = m_lastReleasedSeqNo ; m_firstUnpolledSeqNo = m_lastReleasedSeqNo + 1 ; m_tuplesPending . set ( m_gapTracker . sizeInSequence ( ) ) ; }
current master to tell us where to poll next buffer .
31,483
public static Date getDateFromTransactionId ( long txnId ) { long time = txnId >> ( COUNTER_BITS + INITIATORID_BITS ) ; time += VOLT_EPOCH ; return new Date ( time ) ; }
Given a transaction id return the time of its creation by examining the embedded timestamp .
31,484
private AbstractTopology recoverPartitions ( AbstractTopology topology , String haGroup , Set < Integer > recoverPartitions ) { long version = topology . version ; if ( ! recoverPartitions . isEmpty ( ) ) { if ( Collections . max ( recoverPartitions ) > Collections . max ( m_cartographer . getPartitions ( ) ) ) { recoverPartitions . clear ( ) ; } } AbstractTopology recoveredTopo = AbstractTopology . mutateRecoverTopology ( topology , m_messenger . getLiveHostIds ( ) , m_messenger . getHostId ( ) , haGroup , recoverPartitions ) ; if ( recoveredTopo == null ) { return null ; } List < Integer > partitions = Lists . newArrayList ( recoveredTopo . getPartitionIdList ( m_messenger . getHostId ( ) ) ) ; if ( partitions != null && partitions . size ( ) == m_catalogContext . getNodeSettings ( ) . getLocalSitesCount ( ) ) { TopologyZKUtils . updateTopologyToZK ( m_messenger . getZK ( ) , recoveredTopo ) ; } if ( version < recoveredTopo . version && ! recoverPartitions . isEmpty ( ) ) { consoleLog . info ( "Partition placement layout has been restored for rejoining." ) ; } return recoveredTopo ; }
recover the partition assignment from one of lost hosts in the same placement group for rejoin Use the placement group of the recovering host to find a matched host from the lost nodes in the topology If the partition count from the lost node is the same as the site count of the recovering host The partitions on the lost node will be placed on the recovering host . Partition group layout will be maintained . Topology will be updated on ZK if successful
31,485
private boolean stopRejoiningHost ( ) { try { m_meshDeterminationLatch . await ( ) ; } catch ( InterruptedException e ) { } if ( m_rejoining ) { VoltDB . crashLocalVoltDB ( "Another node failed before this node could finish rejoining. " + "As a result, the rejoin operation has been canceled. Please try again." ) ; return true ; } return false ; }
If the current node hasn t finished rejoin when another node fails fail this node to prevent locking up .
31,486
private void checkExportStreamMastership ( ) { for ( Initiator initiator : m_iv2Initiators . values ( ) ) { if ( initiator . getPartitionId ( ) != MpInitiator . MP_INIT_PID ) { SpInitiator spInitiator = ( SpInitiator ) initiator ; if ( spInitiator . isLeader ( ) ) { ExportManager . instance ( ) . takeMastership ( spInitiator . getPartitionId ( ) ) ; } } } }
move back to partition leader s node .
31,487
void scheduleDailyLoggingWorkInNextCheckTime ( ) { DailyRollingFileAppender dailyAppender = null ; Enumeration < ? > appenders = Logger . getRootLogger ( ) . getAllAppenders ( ) ; while ( appenders . hasMoreElements ( ) ) { Appender appender = ( Appender ) appenders . nextElement ( ) ; if ( appender instanceof DailyRollingFileAppender ) { dailyAppender = ( DailyRollingFileAppender ) appender ; } } final DailyRollingFileAppender dailyRollingFileAppender = dailyAppender ; Field field = null ; if ( dailyRollingFileAppender != null ) { try { field = dailyRollingFileAppender . getClass ( ) . getDeclaredField ( "nextCheck" ) ; field . setAccessible ( true ) ; } catch ( NoSuchFieldException e ) { hostLog . error ( "Failed to set daily system info logging: " + e . getMessage ( ) ) ; } } final Field nextCheckField = field ; long nextCheck = System . currentTimeMillis ( ) ; if ( dailyRollingFileAppender != null && nextCheckField != null ) { try { nextCheck = nextCheckField . getLong ( dailyRollingFileAppender ) ; scheduleWork ( new DailyLogTask ( ) , nextCheck - System . currentTimeMillis ( ) + 30 * 1000 , 0 , TimeUnit . MILLISECONDS ) ; } catch ( Exception e ) { hostLog . error ( "Failed to set daily system info logging: " + e . getMessage ( ) ) ; } } }
Get the next check time for a private member in log4j library which is not a reliable idea . It adds 30 seconds for the initial delay and uses a periodical thread to schedule the daily logging work with this delay .
31,488
private void schedulePeriodicWorks ( ) { m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { public void run ( ) { if ( m_statsManager != null ) { m_statsManager . sendNotification ( ) ; } } } , 0 , StatsManager . POLL_INTERVAL , TimeUnit . MILLISECONDS ) ) ; m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { public void run ( ) { ScheduledExecutorService es = VoltDB . instance ( ) . getSES ( false ) ; if ( es != null && ! es . isShutdown ( ) ) { es . submit ( new Runnable ( ) { public void run ( ) { long timestamp = System . currentTimeMillis ( ) ; m_flc . checkCounter ( timestamp ) ; } } ) ; } } } , 0 , 10 , TimeUnit . SECONDS ) ) ; m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { public void run ( ) { SystemStatsCollector . asyncSampleSystemNow ( false , false ) ; } } , 0 , 5 , TimeUnit . SECONDS ) ) ; m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { public void run ( ) { SystemStatsCollector . asyncSampleSystemNow ( true , false ) ; } } , 0 , 1 , TimeUnit . MINUTES ) ) ; m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { public void run ( ) { SystemStatsCollector . asyncSampleSystemNow ( true , true ) ; } } , 0 , 6 , TimeUnit . MINUTES ) ) ; m_periodicWorks . add ( scheduleWork ( new Runnable ( ) { public void run ( ) { checkExportStreamMastership ( ) ; } } , 0 , 1 , TimeUnit . MINUTES ) ) ; EnterpriseMaintenance em = EnterpriseMaintenance . get ( ) ; if ( em != null ) { em . setupMaintenaceTasks ( ) ; } GCInspector . instance . start ( m_periodicPriorityWorkThread , m_gcStats ) ; }
Schedule all the periodic works
31,489
private boolean determineIfEligibleAsLeader ( Collection < Integer > partitions , Set < Integer > partitionGroupPeers , AbstractTopology topology ) { if ( partitions . contains ( Integer . valueOf ( 0 ) ) ) { return true ; } for ( Integer host : topology . getHostIdList ( 0 ) ) { if ( partitionGroupPeers . contains ( host ) ) { return true ; } } return false ; }
This host can be a leader if partition 0 is on it or it is in the same partition group as a node which has partition 0 . This is because the partition group with partition 0 can never be removed by elastic remove .
31,490
public void run ( ) { if ( m_restoreAgent != null ) { m_restoreAgent . restore ( ) ; } else { onSnapshotRestoreCompletion ( ) ; onReplayCompletion ( Long . MIN_VALUE , m_iv2InitiatorStartingTxnIds ) ; } if ( m_joinCoordinator != null ) { try { m_statusTracker . set ( NodeState . REJOINING ) ; if ( ! m_joinCoordinator . startJoin ( m_catalogContext . database ) ) { VoltDB . crashLocalVoltDB ( "Failed to join the cluster" , true , null ) ; } } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Failed to join the cluster" , true , e ) ; } } m_isRunning = true ; }
Start all the site s event loops . That s it .
31,491
public void cleanUpTempCatalogJar ( ) { File configInfoDir = getConfigDirectory ( ) ; if ( ! configInfoDir . exists ( ) ) { return ; } File tempJar = new VoltFile ( configInfoDir . getPath ( ) , InMemoryJarfile . TMP_CATALOG_JAR_FILENAME ) ; if ( tempJar . exists ( ) ) { tempJar . delete ( ) ; } }
Clean up the temporary jar file
31,492
private void shutdownInitiators ( ) { if ( m_iv2Initiators == null ) { return ; } m_iv2Initiators . descendingMap ( ) . values ( ) . stream ( ) . forEach ( p -> p . shutdown ( ) ) ; }
to be done on SP sites kill SP sites first may risk MP site to wait forever .
31,493
public void createRuntimeReport ( PrintStream out ) { out . print ( "MIME-Version: 1.0\n" ) ; out . print ( "Content-type: multipart/mixed; boundary=\"reportsection\"" ) ; out . print ( "\n\n--reportsection\nContent-Type: text/plain\n\nClientInterface Report\n" ) ; if ( m_clientInterface != null ) { out . print ( m_clientInterface . toString ( ) + "\n" ) ; } }
Debugging function - creates a record of the current state of the system .
31,494
private void initializeDRProducer ( ) { try { if ( m_producerDRGateway != null ) { m_producerDRGateway . startAndWaitForGlobalAgreement ( ) ; for ( Initiator iv2init : m_iv2Initiators . values ( ) ) { iv2init . initDRGateway ( m_config . m_startAction , m_producerDRGateway , isLowestSiteId ( iv2init ) ) ; } m_producerDRGateway . completeInitialization ( ) ; } } catch ( Exception ex ) { CoreUtils . printPortsInUse ( hostLog ) ; VoltDB . crashLocalVoltDB ( "Failed to initialize DR producer" , false , ex ) ; } }
Initialize the DR producer so that any binary log generated on recover will be queued . This does NOT open the DR port . That will happen after command log replay finishes .
31,495
static public long computeMinimumHeapRqt ( int tableCount , int sitesPerHost , int kfactor ) { long baseRqt = 384 ; long tableRqt = 10 * tableCount ; long rejoinRqt = ( kfactor > 0 ) ? 128 * sitesPerHost : 0 ; return baseRqt + tableRqt + rejoinRqt ; }
Any changes there should get reflected here and vice versa .
31,496
synchronized void prepareCommit ( Session session ) { RowActionBase action = this ; do { if ( action . session == session && action . commitTimestamp == 0 ) { action . prepared = true ; } action = action . next ; } while ( action != null ) ; }
for two - phased pre - commit
31,497
synchronized void rollback ( Session session , long timestamp ) { RowActionBase action = this ; do { if ( action . session == session && action . commitTimestamp == 0 ) { if ( action . actionTimestamp >= timestamp || action . actionTimestamp == 0 ) { action . commitTimestamp = session . actionTimestamp ; action . rolledback = true ; action . prepared = false ; } } action = action . next ; } while ( action != null ) ; }
Rollback actions for a session including and after the given timestamp
31,498
synchronized int getCommitType ( long timestamp ) { RowActionBase action = this ; int type = ACTION_NONE ; do { if ( action . commitTimestamp == timestamp ) { type = action . type ; } action = action . next ; } while ( action != null ) ; return type ; }
returns type of commit performed on timestamp . ACTION_NONE if none .
31,499
synchronized boolean canCommit ( Session session , OrderedHashSet set ) { RowActionBase action ; long timestamp = session . transactionTimestamp ; long commitTimestamp = 0 ; final boolean readCommitted = session . isolationMode == SessionInterface . TX_READ_COMMITTED ; action = this ; if ( readCommitted ) { do { if ( action . session == session ) { if ( action . commitTimestamp == 0 ) { timestamp = action . actionTimestamp ; } } action = action . next ; } while ( action != null ) ; action = this ; } do { if ( action . rolledback || action . type == ACTION_NONE ) { action = action . next ; continue ; } if ( action . session != session ) { if ( action . prepared ) { return false ; } if ( action . commitTimestamp == 0 && action . actionTimestamp != 0 ) { set . add ( action . session ) ; } else if ( action . commitTimestamp > commitTimestamp ) { commitTimestamp = action . commitTimestamp ; } } action = action . next ; } while ( action != null ) ; return commitTimestamp < timestamp ; }
returns false if another committed session has altered the same row