idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
32,000
public void start ( ) throws Exception { CountDownLatch zkInitBarrier = new CountDownLatch ( 1 ) ; if ( m_joiner . start ( zkInitBarrier ) ) { m_network . start ( ) ; long agreementHSId = getHSIdForLocalSite ( AGREEMENT_SITE_ID ) ; HashSet < Long > agreementSites = new HashSet < Long > ( ) ; agreementSites . add ( agreementHSId ) ; SiteMailbox sm = new SiteMailbox ( this , agreementHSId ) ; createMailbox ( agreementHSId , sm ) ; m_agreementSite = new AgreementSite ( agreementHSId , agreementSites , 0 , sm , new InetSocketAddress ( m_config . zkInterface . split ( ":" ) [ 0 ] , Integer . parseInt ( m_config . zkInterface . split ( ":" ) [ 1 ] ) ) , m_config . backwardsTimeForgivenessWindow , m_failedHostsCallback ) ; m_agreementSite . start ( ) ; m_agreementSite . waitForRecovery ( ) ; m_zk = org . voltcore . zk . ZKUtil . getClient ( m_config . zkInterface , 60 * 1000 , VERBOTEN_THREADS ) ; if ( m_zk == null ) { throw new Exception ( "Timed out trying to connect local ZooKeeper instance" ) ; } CoreZK . createHierarchy ( m_zk ) ; final int selectedHostId = selectNewHostId ( m_config . coordinatorIp . toString ( ) ) ; if ( selectedHostId != 0 ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Selected host id for coordinator was not 0, " + selectedHostId , false , null ) ; } m_acceptor . accrue ( selectedHostId , m_acceptor . decorate ( new JSONObject ( ) , Optional . empty ( ) ) ) ; JSONObject instance_id = new JSONObject ( ) ; instance_id . put ( "coord" , ByteBuffer . wrap ( m_config . coordinatorIp . getAddress ( ) . getAddress ( ) ) . getInt ( ) ) ; instance_id . put ( "timestamp" , System . currentTimeMillis ( ) ) ; hostLog . debug ( "Cluster will have instance ID:\n" + instance_id . toString ( 4 ) ) ; byte [ ] payload = instance_id . toString ( 4 ) . getBytes ( "UTF-8" ) ; m_zk . create ( CoreZK . instance_id , payload , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; final HostInfo hostInfo = new HostInfo ( m_config . coordinatorIp . toString ( ) , m_config . group , m_config . localSitesCount , m_config . recoveredPartitions ) ; m_zk . create ( CoreZK . hosts_host + selectedHostId , hostInfo . toBytes ( ) , Ids . OPEN_ACL_UNSAFE , CreateMode . EPHEMERAL ) ; } zkInitBarrier . countDown ( ) ; }
Start the host messenger and connect to the leader or become the leader if necessary .
32,001
public InstanceId getInstanceId ( ) { if ( m_instanceId == null ) { try { byte [ ] data = m_zk . getData ( CoreZK . instance_id , false , null ) ; JSONObject idJSON = new JSONObject ( new String ( data , "UTF-8" ) ) ; m_instanceId = new InstanceId ( idJSON . getInt ( "coord" ) , idJSON . getLong ( "timestamp" ) ) ; } catch ( Exception e ) { String msg = "Unable to get instance ID info from " + CoreZK . instance_id ; hostLog . error ( msg ) ; throw new RuntimeException ( msg , e ) ; } } return m_instanceId ; }
Get a unique ID for this cluster
32,002
public void requestJoin ( SocketChannel socket , SSLEngine sslEngine , MessagingChannel messagingChannel , InetSocketAddress listeningAddress , JSONObject jo ) throws Exception { Integer hostId = selectNewHostId ( socket . socket ( ) . getInetAddress ( ) . getHostAddress ( ) ) ; prepSocketChannel ( socket ) ; ForeignHost fhost = null ; try { try { JoinAcceptor . PleaDecision decision = m_acceptor . considerMeshPlea ( m_zk , hostId , jo ) ; writeRequestJoinResponse ( hostId , decision , socket , messagingChannel ) ; if ( ! decision . accepted ) { socket . close ( ) ; return ; } ByteBuffer finishedJoining = ByteBuffer . allocate ( 1 ) ; socket . configureBlocking ( false ) ; long start = System . currentTimeMillis ( ) ; while ( finishedJoining . hasRemaining ( ) && System . currentTimeMillis ( ) - start < 120000 ) { int read = socket . read ( finishedJoining ) ; if ( read == - 1 ) { networkLog . info ( "New connection was unable to establish mesh" ) ; socket . close ( ) ; return ; } else if ( read < 1 ) { Thread . sleep ( 5 ) ; } } PicoNetwork picoNetwork = createPicoNetwork ( sslEngine , socket , false ) ; fhost = new ForeignHost ( this , hostId , socket , m_config . deadHostTimeout , listeningAddress , picoNetwork ) ; putForeignHost ( hostId , fhost ) ; fhost . enableRead ( VERBOTEN_THREADS ) ; m_acceptor . accrue ( hostId , jo ) ; } catch ( Exception e ) { networkLog . error ( "Error joining new node" , e ) ; addFailedHost ( hostId ) ; synchronized ( HostMessenger . this ) { removeForeignHost ( hostId ) ; } m_acceptor . detract ( m_zk , hostId ) ; socket . close ( ) ; return ; } long hsId = CoreUtils . getHSIdFromHostAndSite ( hostId , AGREEMENT_SITE_ID ) ; if ( ! m_agreementSite . requestJoin ( hsId ) . await ( 60 , TimeUnit . SECONDS ) ) { reportForeignHostFailed ( hostId ) ; } } catch ( Throwable e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "" , true , e ) ; } }
Any node can serve a request to join . The coordination of generating a new host id is done via ZK
32,003
public void notifyOfConnection ( int hostId , SocketChannel socket , SSLEngine sslEngine , InetSocketAddress listeningAddress ) throws Exception { networkLog . info ( "Host " + getHostId ( ) + " receives a new connection from host " + hostId ) ; prepSocketChannel ( socket ) ; ForeignHost fhost = new ForeignHost ( this , hostId , socket , Integer . MAX_VALUE , listeningAddress , createPicoNetwork ( sslEngine , socket , true ) ) ; putForeignHost ( hostId , fhost ) ; fhost . enableRead ( VERBOTEN_THREADS ) ; for ( int hId : m_peers ) { if ( m_foreignHosts . get ( hId ) . size ( ) != ( m_secondaryConnections + 1 ) ) { return ; } } m_hasAllSecondaryConnectionCreated = true ; }
SocketJoiner receives the request of creating a new connection from given host id create a new ForeignHost for this connection .
32,004
public Map < Integer , HostInfo > waitForGroupJoin ( int expectedHosts ) { Map < Integer , HostInfo > hostInfos = Maps . newTreeMap ( ) ; try { while ( true ) { ZKUtil . FutureWatcher fw = new ZKUtil . FutureWatcher ( ) ; final List < String > children = m_zk . getChildren ( CoreZK . hosts , fw ) ; final int numChildren = children . size ( ) ; for ( String child : children ) { final HostInfo info = HostInfo . fromBytes ( m_zk . getData ( ZKUtil . joinZKPath ( CoreZK . hosts , child ) , false , null ) ) ; hostInfos . put ( parseHostId ( child ) , info ) ; } if ( numChildren == expectedHosts ) { break ; } if ( numChildren > expectedHosts ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Expected to find " + expectedHosts + " hosts in cluster at startup but found " + numChildren + ". Terminating this host." , false , null ) ; } fw . get ( ) ; } } catch ( Exception e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Error waiting for hosts to be ready" , false , e ) ; } assert hostInfos . size ( ) == expectedHosts ; return hostInfos ; }
Wait until all the nodes have built a mesh .
32,005
public String getHostnameForHostID ( int hostId ) { if ( hostId == m_localHostId ) { return CoreUtils . getHostnameOrAddress ( ) ; } Iterator < ForeignHost > it = m_foreignHosts . get ( hostId ) . iterator ( ) ; if ( it . hasNext ( ) ) { ForeignHost fh = it . next ( ) ; return fh . hostname ( ) ; } return m_knownFailedHosts . get ( hostId ) != null ? m_knownFailedHosts . get ( hostId ) : "UNKNOWN" ; }
Given a hostid return the hostname for it
32,006
public void removeMailbox ( long hsId ) { synchronized ( m_mapLock ) { ImmutableMap . Builder < Long , Mailbox > b = ImmutableMap . builder ( ) ; for ( Map . Entry < Long , Mailbox > e : m_siteMailboxes . entrySet ( ) ) { if ( e . getKey ( ) . equals ( hsId ) ) { continue ; } b . put ( e . getKey ( ) , e . getValue ( ) ) ; } m_siteMailboxes = b . build ( ) ; } }
Discard a mailbox
32,007
public void waitForAllHostsToBeReady ( int expectedHosts ) { try { m_zk . create ( CoreZK . readyhosts_host , null , Ids . OPEN_ACL_UNSAFE , CreateMode . EPHEMERAL_SEQUENTIAL ) ; while ( true ) { ZKUtil . FutureWatcher fw = new ZKUtil . FutureWatcher ( ) ; int readyHosts = m_zk . getChildren ( CoreZK . readyhosts , fw ) . size ( ) ; if ( readyHosts == expectedHosts ) { break ; } fw . get ( ) ; } } catch ( KeeperException | InterruptedException e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Error waiting for hosts to be ready" , false , e ) ; } }
Block on this call until the number of ready hosts is equal to the number of expected hosts .
32,008
public void waitForJoiningHostsToBeReady ( int expectedHosts , int localHostId ) { try { m_zk . create ( ZKUtil . joinZKPath ( CoreZK . readyjoininghosts , Integer . toString ( localHostId ) ) , null , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; while ( true ) { ZKUtil . FutureWatcher fw = new ZKUtil . FutureWatcher ( ) ; int readyHosts = m_zk . getChildren ( CoreZK . readyjoininghosts , fw ) . size ( ) ; if ( readyHosts == expectedHosts ) { break ; } fw . get ( ) ; } } catch ( KeeperException | InterruptedException e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Error waiting for hosts to be ready" , false , e ) ; } }
For elastic join . Block on this call until the number of ready hosts is equal to the number of expected joining hosts .
32,009
public int countForeignHosts ( ) { int retval = 0 ; for ( ForeignHost host : m_foreignHosts . values ( ) ) { if ( ( host != null ) && ( host . isUp ( ) ) ) { retval ++ ; } } return retval ; }
Get the number of up foreign hosts . Used for test purposes .
32,010
public void closeForeignHostSocket ( int hostId ) { Iterator < ForeignHost > it = m_foreignHosts . get ( hostId ) . iterator ( ) ; while ( it . hasNext ( ) ) { ForeignHost fh = it . next ( ) ; if ( fh . isUp ( ) ) { fh . killSocket ( ) ; } } reportForeignHostFailed ( hostId ) ; }
Kill a foreign host socket by id .
32,011
public void cutLink ( int hostIdA , int hostIdB ) { if ( m_localHostId == hostIdA ) { Iterator < ForeignHost > it = m_foreignHosts . get ( hostIdB ) . iterator ( ) ; while ( it . hasNext ( ) ) { ForeignHost fh = it . next ( ) ; fh . cutLink ( ) ; } } if ( m_localHostId == hostIdB ) { Iterator < ForeignHost > it = m_foreignHosts . get ( hostIdA ) . iterator ( ) ; while ( it . hasNext ( ) ) { ForeignHost fh = it . next ( ) ; fh . cutLink ( ) ; } } }
Cut the network connection between two hostids immediately Useful for simulating network partitions
32,012
void execute ( ) { String sCmd = null ; if ( 4096 <= ifHuge . length ( ) ) { sCmd = ifHuge ; } else { sCmd = txtCommand . getText ( ) ; } if ( sCmd . startsWith ( " ) ) { testPerformance ( ) ; return ; } String [ ] g = new String [ 1 ] ; lTime = System . currentTimeMillis ( ) ; try { if ( sStatement == null ) { return ; } sStatement . execute ( sCmd ) ; lTime = System . currentTimeMillis ( ) - lTime ; int r = sStatement . getUpdateCount ( ) ; if ( r == - 1 ) { formatResultSet ( sStatement . getResultSet ( ) ) ; } else { g [ 0 ] = "update count" ; gResult . setHead ( g ) ; g [ 0 ] = String . valueOf ( r ) ; gResult . addRow ( g ) ; } addToRecent ( txtCommand . getText ( ) ) ; } catch ( SQLException e ) { lTime = System . currentTimeMillis ( ) - lTime ; g [ 0 ] = "SQL Error" ; gResult . setHead ( g ) ; String s = e . getMessage ( ) ; s += " / Error Code: " + e . getErrorCode ( ) ; s += " / State: " + e . getSQLState ( ) ; g [ 0 ] = s ; gResult . addRow ( g ) ; } updateResult ( ) ; System . gc ( ) ; }
Adjust this method for large strings ... ie multi megabtypes .
32,013
public void writeAll ( java . sql . ResultSet rs , boolean includeColumnNames ) throws SQLException , IOException { if ( includeColumnNames ) { writeColumnNames ( rs ) ; } while ( rs . next ( ) ) { writeNext ( resultService . getColumnValues ( rs ) ) ; } }
Writes the entire ResultSet to a CSV file .
32,014
public void addRecord ( String key , String value ) throws TarMalformatException , IOException { if ( key == null || value == null || key . length ( ) < 1 || value . length ( ) < 1 ) { throw new TarMalformatException ( RB . singleton . getString ( RB . ZERO_WRITE ) ) ; } int lenWithoutIlen = key . length ( ) + value . length ( ) + 3 ; int lenW = 0 ; if ( lenWithoutIlen < 8 ) { lenW = lenWithoutIlen + 1 ; } else if ( lenWithoutIlen < 97 ) { lenW = lenWithoutIlen + 2 ; } else if ( lenWithoutIlen < 996 ) { lenW = lenWithoutIlen + 3 ; } else if ( lenWithoutIlen < 9995 ) { lenW = lenWithoutIlen + 4 ; } else if ( lenWithoutIlen < 99994 ) { lenW = lenWithoutIlen + 5 ; } else { throw new TarMalformatException ( RB . singleton . getString ( RB . PIF_TOOBIG , 99991 ) ) ; } writer . write ( Integer . toString ( lenW ) ) ; writer . write ( ' ' ) ; writer . write ( key ) ; writer . write ( '=' ) ; writer . write ( value ) ; writer . write ( '\n' ) ; writer . flush ( ) ; }
I guess the initial length field is supposed to be in units of characters not bytes?
32,015
synchronized public void shutdown ( ) { m_shutdown . set ( true ) ; if ( m_isExecutorServiceLocal ) { try { m_es . shutdown ( ) ; m_es . awaitTermination ( 365 , TimeUnit . DAYS ) ; } catch ( InterruptedException e ) { repairLog . warn ( "Unexpected interrupted exception" , e ) ; } } }
shutdown silences the babysitter and causes watches to not reset . Note that shutting down will churn ephemeral ZK nodes - shutdown allows the programmer to not set watches on nodes from terminated session .
32,016
public static Pair < BabySitter , List < String > > blockingFactory ( ZooKeeper zk , String dir , Callback cb ) throws InterruptedException , ExecutionException { ExecutorService es = CoreUtils . getCachedSingleThreadExecutor ( "Babysitter-" + dir , 15000 ) ; Pair < BabySitter , List < String > > babySitter = blockingFactory ( zk , dir , cb , es ) ; babySitter . getFirst ( ) . m_isExecutorServiceLocal = true ; return babySitter ; }
Create a new BabySitter and block on reading the initial children list .
32,017
public static Pair < BabySitter , List < String > > blockingFactory ( ZooKeeper zk , String dir , Callback cb , ExecutorService es ) throws InterruptedException , ExecutionException { BabySitter bs = new BabySitter ( zk , dir , cb , es ) ; List < String > initialChildren ; try { initialChildren = bs . m_eventHandler . call ( ) ; } catch ( Exception e ) { throw new ExecutionException ( e ) ; } return new Pair < BabySitter , List < String > > ( bs , initialChildren ) ; }
Create a new BabySitter and block on reading the initial children list . Use the provided ExecutorService to queue events to rather than creating a private ExecutorService . The initial set of children will be retrieved in the current thread and not the ExecutorService because it is assumed this is being called from the ExecutorService
32,018
public static BabySitter nonblockingFactory ( ZooKeeper zk , String dir , Callback cb , ExecutorService es ) throws InterruptedException , ExecutionException { BabySitter bs = new BabySitter ( zk , dir , cb , es ) ; bs . m_es . submit ( bs . m_eventHandler ) ; return bs ; }
Create a new BabySitter and make sure it reads the initial children list . Use the provided ExecutorService to queue events to rather than creating a private ExecutorService .
32,019
void checkClosed ( ) throws SQLException { if ( isClosed ) { throw Util . sqlException ( ErrorCode . X_07501 ) ; } if ( connection . isClosed ) { close ( ) ; throw Util . sqlException ( ErrorCode . X_08503 ) ; } }
An internal check for closed statements .
32,020
void performPostExecute ( ) throws SQLException { resultOut . clearLobResults ( ) ; generatedResult = null ; if ( resultIn == null ) { return ; } Result current = resultIn ; while ( current . getChainedResult ( ) != null ) { current = current . getUnlinkChainedResult ( ) ; if ( current . getType ( ) == ResultConstants . WARNING ) { SQLWarning w = Util . sqlWarning ( current ) ; if ( rootWarning == null ) { rootWarning = w ; } else { rootWarning . setNextWarning ( w ) ; } } else if ( current . getType ( ) == ResultConstants . ERROR ) { errorResult = current ; } else if ( current . getType ( ) == ResultConstants . DATA ) { generatedResult = current ; } } if ( resultIn . isData ( ) ) { currentResultSet = new JDBCResultSet ( connection . sessionProxy , this , resultIn , resultIn . metaData , connection . connProperties ) ; } }
processes chained warnings and any generated columns result set
32,021
boolean getMoreResults ( int current ) throws SQLException { checkClosed ( ) ; if ( resultIn == null || ! resultIn . isData ( ) ) { return false ; } if ( resultSetCounter == 0 ) { resultSetCounter ++ ; return true ; } if ( currentResultSet != null && current != KEEP_CURRENT_RESULT ) { currentResultSet . close ( ) ; } resultIn = null ; return false ; }
Note yet correct for multiple ResultSets . Should keep track of the previous ResultSet objects to be able to close them
32,022
void closeResultData ( ) throws SQLException { if ( currentResultSet != null ) { currentResultSet . close ( ) ; } if ( generatedResultSet != null ) { generatedResultSet . close ( ) ; } generatedResultSet = null ; generatedResult = null ; resultIn = null ; }
See comment for getMoreResults .
32,023
public boolean compatibleWithTable ( VoltTable table ) { String candidateName = getTableName ( table ) ; if ( candidateName . equals ( viewName ) ) { return false ; } if ( candidateName . equals ( srcTableName ) == false ) { return false ; } try { int groupColIndex = table . getColumnIndex ( groupColName ) ; VoltType groupColType = table . getColumnType ( groupColIndex ) ; if ( groupColType == VoltType . DECIMAL ) { return false ; } int sumColIndex = table . getColumnIndex ( sumColName ) ; VoltType sumColType = table . getColumnType ( sumColIndex ) ; if ( ( sumColType == VoltType . TINYINT ) || ( sumColType == VoltType . SMALLINT ) || ( sumColType == VoltType . INTEGER ) ) { return true ; } else { return false ; } } catch ( IllegalArgumentException e ) { return false ; } }
Check if the view could apply to the provided table unchanged .
32,024
public static ThreadFactory platformThreadFactory ( ) { if ( ! isAppEngine ( ) ) { return Executors . defaultThreadFactory ( ) ; } try { return ( ThreadFactory ) Class . forName ( "com.google_voltpatches.appengine.api.ThreadManager" ) . getMethod ( "currentRequestThreadFactory" ) . invoke ( null ) ; } catch ( IllegalAccessException e ) { throw new RuntimeException ( "Couldn't invoke ThreadManager.currentRequestThreadFactory" , e ) ; } catch ( ClassNotFoundException e ) { throw new RuntimeException ( "Couldn't invoke ThreadManager.currentRequestThreadFactory" , e ) ; } catch ( NoSuchMethodException e ) { throw new RuntimeException ( "Couldn't invoke ThreadManager.currentRequestThreadFactory" , e ) ; } catch ( InvocationTargetException e ) { throw Throwables . propagate ( e . getCause ( ) ) ; } }
Returns a default thread factory used to create new threads .
32,025
public static void verifySnapshots ( final List < String > directories , final Set < String > snapshotNames ) { FileFilter filter = new SnapshotFilter ( ) ; if ( ! snapshotNames . isEmpty ( ) ) { filter = new SpecificSnapshotFilter ( snapshotNames ) ; } Map < String , Snapshot > snapshots = new HashMap < String , Snapshot > ( ) ; for ( String directory : directories ) { SnapshotUtil . retrieveSnapshotFiles ( new File ( directory ) , snapshots , filter , true , SnapshotPathType . SNAP_PATH , CONSOLE_LOG ) ; } if ( snapshots . isEmpty ( ) ) { System . out . println ( "Snapshot corrupted" ) ; System . out . println ( "No files found" ) ; } for ( Snapshot s : snapshots . values ( ) ) { System . out . println ( SnapshotUtil . generateSnapshotReport ( s . getTxnId ( ) , s ) . getSecond ( ) ) ; } }
Perform snapshot verification .
32,026
public long lowestEquivalentValue ( final long value ) { final int bucketIndex = getBucketIndex ( value ) ; final int subBucketIndex = getSubBucketIndex ( value , bucketIndex ) ; long thisValueBaseLevel = valueFromIndex ( bucketIndex , subBucketIndex ) ; return thisValueBaseLevel ; }
Get the lowest value that is equivalent to the given value within the histogram s resolution . Where equivalent means that value samples recorded for any two equivalent values are counted in a common total count .
32,027
public double getMean ( ) { if ( getTotalCount ( ) == 0 ) { return 0.0 ; } recordedValuesIterator . reset ( ) ; double totalValue = 0 ; while ( recordedValuesIterator . hasNext ( ) ) { HistogramIterationValue iterationValue = recordedValuesIterator . next ( ) ; totalValue += medianEquivalentValue ( iterationValue . getValueIteratedTo ( ) ) * iterationValue . getCountAtValueIteratedTo ( ) ; } return ( totalValue * 1.0 ) / getTotalCount ( ) ; }
Get the computed mean value of all recorded values in the histogram
32,028
public double getStdDeviation ( ) { if ( getTotalCount ( ) == 0 ) { return 0.0 ; } final double mean = getMean ( ) ; double geometric_deviation_total = 0.0 ; recordedValuesIterator . reset ( ) ; while ( recordedValuesIterator . hasNext ( ) ) { HistogramIterationValue iterationValue = recordedValuesIterator . next ( ) ; Double deviation = ( medianEquivalentValue ( iterationValue . getValueIteratedTo ( ) ) * 1.0 ) - mean ; geometric_deviation_total += ( deviation * deviation ) * iterationValue . getCountAddedInThisIterationStep ( ) ; } double std_deviation = Math . sqrt ( geometric_deviation_total / getTotalCount ( ) ) ; return std_deviation ; }
Get the computed standard deviation of all recorded values in the histogram
32,029
public void reestablishTotalCount ( ) { long totalCounted = 0 ; for ( int i = 0 ; i < countsArrayLength ; i ++ ) { totalCounted += getCountAtIndex ( i ) ; } setTotalCount ( totalCounted ) ; }
Reestablish the internal notion of totalCount by recalculating it from recorded values .
32,030
static void setTableColumnsForSubquery ( Table table , QueryExpression queryExpression , boolean fullIndex ) { table . columnList = queryExpression . getColumns ( ) ; table . columnCount = queryExpression . getColumnCount ( ) ; table . createPrimaryKey ( ) ; if ( fullIndex ) { int [ ] colIndexes = null ; colIndexes = table . getNewColumnMap ( ) ; ArrayUtil . fillSequence ( colIndexes ) ; table . fullIndex = table . createIndexForColumns ( colIndexes ) ; } }
For table subqueries
32,031
public void considerCandidatePlan ( CompiledPlan plan , AbstractParsedStmt parsedStmt ) { ScanDeterminizer . apply ( plan , m_detMode ) ; plan . sql = m_sql ; m_stats = new PlanStatistics ( ) ; AbstractPlanNode planGraph = plan . rootPlanGraph ; planGraph . computeEstimatesRecursively ( m_stats , m_estimates , m_paramHints ) ; plan . cost = m_costModel . getPlanCost ( m_stats ) ; String filename = String . valueOf ( m_planId ++ ) ; if ( m_bestPlan == null || plan . cost < m_bestPlan . cost ) { m_bestPlan = plan ; m_bestFilename = filename ; } outputPlan ( plan , planGraph , filename ) ; }
Picks the best cost plan for a given raw plan
32,032
public static PartitionDRGateway getInstance ( int partitionId , ProducerDRGateway producerGateway , StartAction startAction ) { PartitionDRGateway pdrg = null ; if ( producerGateway != null ) { pdrg = tryToLoadProVersion ( ) ; } if ( pdrg == null ) { pdrg = new PartitionDRGateway ( ) ; } try { pdrg . init ( partitionId , producerGateway , startAction ) ; } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( e . getMessage ( ) , true , e ) ; } assert ! m_partitionDRGateways . containsKey ( partitionId ) ; ImmutableMap . Builder < Integer , PartitionDRGateway > builder = ImmutableMap . builder ( ) ; builder . putAll ( m_partitionDRGateways ) ; builder . put ( partitionId , pdrg ) ; m_partitionDRGateways = builder . build ( ) ; return pdrg ; }
Load the full subclass if it should otherwise load the noop stub .
32,033
public String [ ] getUserPermissionList ( String userName ) { if ( ! m_enabled ) { return m_perm_list ; } if ( userName == null ) { return new String [ ] { } ; } AuthUser user = getUser ( userName ) ; if ( user == null ) { return new String [ ] { } ; } return user . m_permissions_list ; }
Get users permission list not god for permission checking .
32,034
public void callProcedure ( AuthUser user , boolean isAdmin , int timeout , ProcedureCallback cb , String procName , Object [ ] args ) { assert ( cb != null ) ; StoredProcedureInvocation task = new StoredProcedureInvocation ( ) ; task . setProcName ( procName ) ; task . setParams ( args ) ; if ( timeout != BatchTimeoutOverrideType . NO_TIMEOUT ) { task . setBatchTimeout ( timeout ) ; } InternalAdapterTaskAttributes kattrs = new InternalAdapterTaskAttributes ( DEFAULT_INTERNAL_ADAPTER_NAME , isAdmin , connectionId ( ) ) ; assert ( m_dispatcher != null ) ; try { task = MiscUtils . roundTripForCL ( task ) ; } catch ( Exception e ) { String msg = String . format ( "Cannot invoke procedure %s. failed to create task: %s" , procName , e . getMessage ( ) ) ; m_logger . rateLimitedLog ( SUPPRESS_INTERVAL , Level . ERROR , null , msg ) ; ClientResponseImpl cri = new ClientResponseImpl ( ClientResponse . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , msg ) ; try { cb . clientCallback ( cri ) ; } catch ( Exception e1 ) { throw new IllegalStateException ( e1 ) ; } } createTransaction ( kattrs , cb , task , user ) ; }
Used to call a procedure from NTPRocedureRunner Calls createTransaction with the proper params
32,035
public static void printSystemOut ( String message1 , long message2 ) { if ( TRACESYSTEMOUT ) { System . out . print ( message1 ) ; System . out . println ( message2 ) ; } }
Used to print messages to System . out
32,036
public static boolean acceptsPrecision ( int type ) { switch ( type ) { case Types . SQL_BINARY : case Types . SQL_BIT : case Types . SQL_BIT_VARYING : case Types . SQL_BLOB : case Types . SQL_CHAR : case Types . SQL_NCHAR : case Types . SQL_CLOB : case Types . NCLOB : case Types . SQL_VARBINARY : case Types . SQL_VARCHAR : case Types . SQL_NVARCHAR : case Types . VARCHAR_IGNORECASE : case Types . SQL_DECIMAL : case Types . SQL_NUMERIC : case Types . SQL_FLOAT : case Types . SQL_TIME : case Types . SQL_TIMESTAMP : case Types . SQL_INTERVAL_YEAR : case Types . SQL_INTERVAL_YEAR_TO_MONTH : case Types . SQL_INTERVAL_MONTH : case Types . SQL_INTERVAL_DAY : case Types . SQL_INTERVAL_DAY_TO_HOUR : case Types . SQL_INTERVAL_DAY_TO_MINUTE : case Types . SQL_INTERVAL_DAY_TO_SECOND : case Types . SQL_INTERVAL_HOUR : case Types . SQL_INTERVAL_HOUR_TO_MINUTE : case Types . SQL_INTERVAL_HOUR_TO_SECOND : case Types . SQL_INTERVAL_MINUTE : case Types . SQL_INTERVAL_MINUTE_TO_SECOND : case Types . SQL_INTERVAL_SECOND : case Types . VOLT_GEOGRAPHY : return true ; default : return false ; } }
Types that accept precision params in column definition or casts . CHAR VARCHAR and VARCHAR_IGNORECASE params are ignored when the sql . enforce_strict_types is false .
32,037
public static < T > Iterable < T > cycle ( T ... elements ) { return cycle ( Lists . newArrayList ( elements ) ) ; }
Returns an iterable whose iterators cycle indefinitely over the provided elements .
32,038
protected void coreLoadCatalog ( long timestamp , final byte [ ] catalogBytes ) throws EEException { LOG . trace ( "Loading Application Catalog..." ) ; int errorCode = 0 ; errorCode = nativeLoadCatalog ( pointer , timestamp , catalogBytes ) ; checkErrorCode ( errorCode ) ; }
Provide a serialized catalog and initialize version 0 of the engine s catalog .
32,039
public void coreUpdateCatalog ( long timestamp , boolean isStreamUpdate , final String catalogDiffs ) throws EEException { LOG . trace ( "Loading Application Catalog..." ) ; int errorCode = 0 ; errorCode = nativeUpdateCatalog ( pointer , timestamp , isStreamUpdate , getStringBytes ( catalogDiffs ) ) ; checkErrorCode ( errorCode ) ; }
Provide a catalog diff and a new catalog version and update the engine s catalog .
32,040
public int extractPerFragmentStats ( int batchSize , long [ ] executionTimesOut ) { m_perFragmentStatsBuffer . clear ( ) ; m_perFragmentStatsBuffer . get ( ) ; int succeededFragmentsCount = m_perFragmentStatsBuffer . getInt ( ) ; if ( executionTimesOut != null ) { assert ( executionTimesOut . length >= succeededFragmentsCount ) ; for ( int i = 0 ; i < succeededFragmentsCount ; i ++ ) { executionTimesOut [ i ] = m_perFragmentStatsBuffer . getLong ( ) ; } if ( succeededFragmentsCount < executionTimesOut . length ) { executionTimesOut [ succeededFragmentsCount ] = m_perFragmentStatsBuffer . getLong ( ) ; } } return succeededFragmentsCount ; }
Extract the per - fragment stats from the buffer .
32,041
public VoltTable [ ] getStats ( final StatsSelector selector , final int locators [ ] , final boolean interval , final Long now ) { m_nextDeserializer . clear ( ) ; final int numResults = nativeGetStats ( pointer , selector . ordinal ( ) , locators , interval , now ) ; if ( numResults == - 1 ) { throwExceptionForError ( ERRORCODE_ERROR ) ; } try { m_nextDeserializer . readInt ( ) ; final VoltTable results [ ] = new VoltTable [ numResults ] ; for ( int ii = 0 ; ii < numResults ; ii ++ ) { int len = m_nextDeserializer . readInt ( ) ; byte [ ] bufCopy = new byte [ len ] ; m_nextDeserializer . readFully ( bufCopy , 0 , len ) ; results [ ii ] = PrivateVoltTableFactory . createVoltTableFromBuffer ( ByteBuffer . wrap ( bufCopy ) , false ) ; } return results ; } catch ( final IOException ex ) { LOG . error ( "Failed to deserialze result table for getStats" + ex ) ; throw new EEException ( ERRORCODE_WRONG_SERIALIZED_BYTES ) ; } }
Retrieve a set of statistics using the specified selector from the StatisticsSelector enum .
32,042
public boolean storeLargeTempTableBlock ( long siteId , long blockCounter , ByteBuffer block ) { LargeBlockTask task = LargeBlockTask . getStoreTask ( new BlockId ( siteId , blockCounter ) , block ) ; return executeLargeBlockTaskSynchronously ( task ) ; }
Store a large temp table block to disk .
32,043
public boolean loadLargeTempTableBlock ( long siteId , long blockCounter , ByteBuffer block ) { LargeBlockTask task = LargeBlockTask . getLoadTask ( new BlockId ( siteId , blockCounter ) , block ) ; return executeLargeBlockTaskSynchronously ( task ) ; }
Read a large table block from disk and write it to a ByteBuffer . Block will still be stored on disk when this operation completes .
32,044
public boolean releaseLargeTempTableBlock ( long siteId , long blockCounter ) { LargeBlockTask task = LargeBlockTask . getReleaseTask ( new BlockId ( siteId , blockCounter ) ) ; return executeLargeBlockTaskSynchronously ( task ) ; }
Delete the block with the given id from disk .
32,045
public List < String > getSQLStatements ( ) { List < String > sqlStatements = new ArrayList < > ( plannedStatements . size ( ) ) ; for ( AdHocPlannedStatement plannedStatement : plannedStatements ) { sqlStatements . add ( new String ( plannedStatement . sql , Constants . UTF8ENCODING ) ) ; } return sqlStatements ; }
Retrieve all the SQL statement text as a list of strings .
32,046
public boolean isSinglePartitionCompatible ( ) { for ( AdHocPlannedStatement plannedStmt : plannedStatements ) { if ( plannedStmt . core . collectorFragment != null ) { return false ; } } return true ; }
Detect if batch is compatible with single partition optimizations
32,047
public ByteBuffer flattenPlanArrayToBuffer ( ) throws IOException { int size = 0 ; ParameterSet userParamCache = null ; if ( userParamSet == null ) { userParamCache = ParameterSet . emptyParameterSet ( ) ; } else { Object [ ] typedUserParams = new Object [ userParamSet . length ] ; int ii = 0 ; for ( AdHocPlannedStatement cs : plannedStatements ) { for ( VoltType paramType : cs . core . parameterTypes ) { if ( ii >= typedUserParams . length ) { String errorMsg = "Too few actual arguments were passed for the parameters in the sql statement(s): (" + typedUserParams . length + " vs. " + ii + ")" ; throw new VoltTypeException ( errorMsg ) ; } typedUserParams [ ii ] = ParameterConverter . tryToMakeCompatible ( paramType . classFromType ( ) , userParamSet [ ii ] ) ; ii ++ ; } } if ( ii < typedUserParams . length ) { String errorMsg = "Too many actual arguments were passed for the parameters in the sql statement(s): (" + typedUserParams . length + " vs. " + ii + ")" ; throw new VoltTypeException ( errorMsg ) ; } userParamCache = ParameterSet . fromArrayNoCopy ( typedUserParams ) ; } size += userParamCache . getSerializedSize ( ) ; size += 2 ; for ( AdHocPlannedStatement cs : plannedStatements ) { size += cs . getSerializedSize ( ) ; } ByteBuffer buf = ByteBuffer . allocate ( size ) ; userParamCache . flattenToBuffer ( buf ) ; buf . putShort ( ( short ) plannedStatements . size ( ) ) ; for ( AdHocPlannedStatement cs : plannedStatements ) { cs . flattenToBuffer ( buf ) ; } return buf ; }
For convenience serialization is accomplished with this single method but deserialization is piecemeal via the static methods userParamsFromBuffer and planArrayFromBuffer with no dummy AdHocPlannedStmtBatch receiver instance required .
32,048
public String explainStatement ( int i , Database db , boolean getJSONString ) { AdHocPlannedStatement plannedStatement = plannedStatements . get ( i ) ; String aggplan = new String ( plannedStatement . core . aggregatorFragment , Constants . UTF8ENCODING ) ; PlanNodeTree pnt = new PlanNodeTree ( ) ; try { String result = null ; JSONObject jobj = new JSONObject ( aggplan ) ; if ( getJSONString ) { result = jobj . toString ( 4 ) ; } pnt . loadFromJSONPlan ( jobj , db ) ; if ( plannedStatement . core . collectorFragment != null ) { String collplan = new String ( plannedStatement . core . collectorFragment , Constants . UTF8ENCODING ) ; PlanNodeTree collpnt = new PlanNodeTree ( ) ; JSONObject jobMP = new JSONObject ( collplan ) ; collpnt . loadFromJSONPlan ( jobMP , db ) ; assert ( collpnt . getRootPlanNode ( ) instanceof SendPlanNode ) ; pnt . getRootPlanNode ( ) . reattachFragment ( collpnt . getRootPlanNode ( ) ) ; if ( getJSONString ) { result += "\n" + jobMP . toString ( 4 ) ; } } if ( ! getJSONString ) { result = pnt . getRootPlanNode ( ) . toExplainPlanString ( ) ; } return result ; } catch ( JSONException e ) { System . out . println ( e ) ; return "Internal Error (JSONException): " + e . getMessage ( ) ; } }
Return the EXPLAIN string of the batched statement at the index
32,049
public synchronized static AdHocCompilerCache getCacheForCatalogHash ( byte [ ] catalogHash ) { String hashString = Encoder . hexEncode ( catalogHash ) ; AdHocCompilerCache cache = m_catalogHashMatch . getIfPresent ( hashString ) ; if ( cache == null ) { cache = new AdHocCompilerCache ( ) ; m_catalogHashMatch . put ( hashString , cache ) ; } return cache ; }
Get the global cache for a given hash of the catalog . Note that there can be only one cache per catalogHash at a time .
32,050
synchronized void printStats ( ) { String line1 = String . format ( "CACHE STATS - Literals: Hits %d/%d (%.1f%%), Inserts %d Evictions %d\n" , m_literalHits , m_literalQueries , ( m_literalHits * 100.0 ) / m_literalQueries , m_literalInsertions , m_literalEvictions ) ; String line2 = String . format ( "CACHE STATS - Plans: Hits %d/%d (%.1f%%), Inserts %d Evictions %d\n" , m_planHits , m_planQueries , ( m_planHits * 100.0 ) / m_planQueries , m_planInsertions , m_planEvictions ) ; System . out . print ( line1 + line2 ) ; System . out . flush ( ) ; m_literalHits = 0 ; m_literalQueries = 0 ; m_literalInsertions = 0 ; m_literalEvictions = 0 ; m_planHits = 0 ; m_planQueries = 0 ; m_planInsertions = 0 ; m_planEvictions = 0 ; }
Stats printing method used during development . Probably shouldn t live past real stats integration .
32,051
public synchronized void put ( String sql , String parsedToken , AdHocPlannedStatement planIn , String [ ] extractedLiterals , boolean hasUserQuestionMarkParameters , boolean hasAutoParameterizedException ) { assert ( sql != null ) ; assert ( parsedToken != null ) ; assert ( planIn != null ) ; AdHocPlannedStatement plan = planIn ; assert ( new String ( plan . sql , Constants . UTF8ENCODING ) . equals ( sql ) ) ; assert ( ! hasUserQuestionMarkParameters || ! hasAutoParameterizedException ) ; if ( ! hasAutoParameterizedException ) { BoundPlan matched = null ; BoundPlan unmatched = new BoundPlan ( planIn . core , planIn . parameterBindings ( extractedLiterals ) ) ; List < BoundPlan > boundVariants = m_coreCache . get ( parsedToken ) ; if ( boundVariants == null ) { boundVariants = new ArrayList < BoundPlan > ( ) ; m_coreCache . put ( parsedToken , boundVariants ) ; ++ m_planInsertions ; } else { for ( BoundPlan boundPlan : boundVariants ) { if ( boundPlan . equals ( unmatched ) ) { matched = boundPlan ; break ; } } if ( matched != null ) { if ( unmatched . m_core != matched . m_core ) { plan = new AdHocPlannedStatement ( planIn , matched . m_core ) ; plan . setBoundConstants ( matched . m_constants ) ; } } } if ( matched == null ) { boundVariants . add ( unmatched ) ; } } if ( ! hasUserQuestionMarkParameters ) { AdHocPlannedStatement cachedPlan = m_literalCache . get ( sql ) ; if ( cachedPlan == null ) { m_literalCache . put ( sql , plan ) ; ++ m_literalInsertions ; } else { assert ( cachedPlan . equals ( plan ) ) ; } } }
Called from the PlannerTool directly when it finishes planning . This is the only way to populate the cache .
32,052
public void startPeriodicStatsPrinting ( ) { if ( m_statsTimer == null ) { m_statsTimer = new Timer ( ) ; m_statsTimer . scheduleAtFixedRate ( new TimerTask ( ) { public void run ( ) { printStats ( ) ; } } , 5000 , 5000 ) ; } }
Start a timer that prints cache stats to the console every 5s . Used for development until we get better stats integration .
32,053
public Date getDate ( int parameterIndex , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of the designated JDBC DATE parameter as a java . sql . Date object using the given Calendar object to construct the date .
32,054
public Date getDate ( String parameterName , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of a JDBC DATE parameter as a java . sql . Date object using the given Calendar object to construct the date .
32,055
public Object getObject ( String parameterName , Map < String , Class < ? > > map ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Returns an object representing the value of OUT parameter parameterName and uses map for the custom mapping of the parameter value .
32,056
public Time getTime ( int parameterIndex , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of the designated JDBC TIME parameter as a java . sql . Time object using the given Calendar object to construct the time .
32,057
public Time getTime ( String parameterName , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of a JDBC TIME parameter as a java . sql . Time object using the given Calendar object to construct the time .
32,058
public Timestamp getTimestamp ( int parameterIndex , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of the designated JDBC TIMESTAMP parameter as a java . sql . Timestamp object using the given Calendar object to construct the Timestamp object .
32,059
public Timestamp getTimestamp ( String parameterName , Calendar cal ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves the value of a JDBC TIMESTAMP parameter as a java . sql . Timestamp object using the given Calendar object to construct the Timestamp object .
32,060
public void registerOutParameter ( String parameterName , int sqlType , int scale ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Registers the parameter named parameterName to be of JDBC type sqlType .
32,061
public void setNString ( String parameterName , String value ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Sets the designated parameter to the given String object .
32,062
public void setURL ( String parameterName , URL val ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Sets the designated parameter to the given java . net . URL object .
32,063
public static String suffixHSIdsWithMigratePartitionLeaderRequest ( Long HSId ) { return Long . toString ( Long . MAX_VALUE ) + "/" + Long . toString ( HSId ) + migrate_partition_leader_suffix ; }
Generate a HSID string with BALANCE_SPI_SUFFIX information . When this string is updated we can tell the reason why HSID is changed .
32,064
public void startPartitionWatch ( ) throws InterruptedException , ExecutionException { Future < ? > task = m_es . submit ( new PartitionWatchEvent ( ) ) ; task . get ( ) ; }
Initialized and start watching partition level cache this function is blocking .
32,065
private void processPartitionWatchEvent ( ) throws KeeperException , InterruptedException { try { m_zk . create ( m_rootNode , null , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; m_zk . getData ( m_rootNode , m_childWatch , null ) ; } catch ( KeeperException . NodeExistsException e ) { m_zk . getData ( m_rootNode , m_childWatch , null ) ; } }
Race to create partition - specific zk node and put a watch on it .
32,066
public Object convertToDefaultType ( SessionInterface session , Object a ) { if ( a == null ) { return a ; } Type otherType ; if ( a instanceof Number ) { if ( a instanceof BigInteger ) { a = new BigDecimal ( ( BigInteger ) a ) ; } else if ( a instanceof Float ) { a = new Double ( ( ( Float ) a ) . doubleValue ( ) ) ; } else if ( a instanceof Byte ) { a = ValuePool . getInt ( ( ( Byte ) a ) . intValue ( ) ) ; } else if ( a instanceof Short ) { a = ValuePool . getInt ( ( ( Short ) a ) . intValue ( ) ) ; } if ( a instanceof Integer ) { otherType = Type . SQL_INTEGER ; } else if ( a instanceof Long ) { otherType = Type . SQL_BIGINT ; } else if ( a instanceof Double ) { otherType = Type . SQL_DOUBLE ; } else if ( a instanceof BigDecimal ) { otherType = Type . SQL_DECIMAL_DEFAULT ; } else { throw Error . error ( ErrorCode . X_42561 ) ; } switch ( typeCode ) { case Types . TINYINT : case Types . SQL_SMALLINT : case Types . SQL_INTEGER : return convertToInt ( session , a , Types . INTEGER ) ; case Types . SQL_BIGINT : return convertToLong ( session , a ) ; case Types . SQL_REAL : case Types . SQL_FLOAT : case Types . SQL_DOUBLE : return convertToDouble ( a ) ; case Types . SQL_NUMERIC : case Types . SQL_DECIMAL : { a = convertToDecimal ( a ) ; BigDecimal dec = ( BigDecimal ) a ; if ( scale != dec . scale ( ) ) { dec = dec . setScale ( scale , BigDecimal . ROUND_HALF_DOWN ) ; } return dec ; } default : throw Error . error ( ErrorCode . X_42561 ) ; } } else if ( a instanceof String ) { otherType = Type . SQL_VARCHAR ; } else { throw Error . error ( ErrorCode . X_42561 ) ; } return convertToType ( session , a , otherType ) ; }
Converts a value to this type
32,067
private static Double convertToDouble ( Object a ) { double value ; if ( a instanceof java . lang . Double ) { return ( Double ) a ; } else if ( a instanceof BigDecimal ) { BigDecimal bd = ( BigDecimal ) a ; value = bd . doubleValue ( ) ; int signum = bd . signum ( ) ; BigDecimal bdd = new BigDecimal ( value + signum ) ; if ( bdd . compareTo ( bd ) != signum ) { throw Error . error ( ErrorCode . X_22003 ) ; } } else { value = ( ( Number ) a ) . doubleValue ( ) ; } return ValuePool . getDouble ( Double . doubleToLongBits ( value ) ) ; }
Converter from a numeric object to Double . Input is checked to be within range represented by Double
32,068
public Object mod ( Object a , Object b ) { if ( a == null || b == null ) { return null ; } switch ( typeCode ) { case Types . SQL_REAL : case Types . SQL_FLOAT : case Types . SQL_DOUBLE : { double ad = ( ( Number ) a ) . doubleValue ( ) ; double bd = ( ( Number ) b ) . doubleValue ( ) ; if ( bd == 0 ) { throw Error . error ( ErrorCode . X_22012 ) ; } return ValuePool . getDouble ( Double . doubleToLongBits ( ad % bd ) ) ; } case Types . SQL_DECIMAL : { if ( ( b ) . equals ( 0 ) ) { throw Error . error ( ErrorCode . X_22012 ) ; } return ValuePool . getBigDecimal ( ( ( BigDecimal ) a ) . remainder ( ( BigDecimal ) b ) ) ; } case Types . TINYINT : case Types . SQL_SMALLINT : case Types . SQL_INTEGER : { int ai = ( ( Number ) a ) . intValue ( ) ; int bi = ( ( Number ) b ) . intValue ( ) ; if ( bi == 0 ) { throw Error . error ( ErrorCode . X_22012 ) ; } return ValuePool . getInt ( ai % bi ) ; } case Types . SQL_BIGINT : { long al = ( ( Number ) a ) . longValue ( ) ; long bl = ( ( Number ) b ) . longValue ( ) ; if ( bl == 0 ) { throw Error . error ( ErrorCode . X_22012 ) ; } return ValuePool . getLong ( al % bl ) ; } default : throw Error . runtimeError ( ErrorCode . U_S0500 , "NumberType" ) ; } }
A VoltDB extension
32,069
public synchronized void write ( int c ) throws IOException { checkClosed ( ) ; int newcount = count + 1 ; if ( newcount > buf . length ) { buf = copyOf ( buf , Math . max ( buf . length << 1 , newcount ) ) ; } buf [ count ] = ( char ) c ; count = newcount ; }
Writes the specified single character .
32,070
private void initiateSPIMigrationIfRequested ( Iv2InitiateTaskMessage msg ) { if ( ! "@MigratePartitionLeader" . equals ( msg . getStoredProcedureName ( ) ) ) { return ; } final Object [ ] params = msg . getParameters ( ) ; int pid = Integer . parseInt ( params [ 1 ] . toString ( ) ) ; if ( pid != m_partitionId ) { tmLog . warn ( String . format ( "@MigratePartitionLeader executed at a wrong partition %d for partition %d." , m_partitionId , pid ) ) ; return ; } RealVoltDB db = ( RealVoltDB ) VoltDB . instance ( ) ; int hostId = Integer . parseInt ( params [ 2 ] . toString ( ) ) ; Long newLeaderHSId = db . getCartographer ( ) . getHSIDForPartitionHost ( hostId , pid ) ; if ( newLeaderHSId == null || newLeaderHSId == m_hsId ) { tmLog . warn ( String . format ( "@MigratePartitionLeader the partition leader is already on the host %d or the host id is invalid." , hostId ) ) ; return ; } SpScheduler scheduler = ( SpScheduler ) m_scheduler ; scheduler . checkPointMigratePartitionLeader ( ) ; scheduler . m_isLeader = false ; m_newLeaderHSID = newLeaderHSId ; m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . STARTED ; LeaderCache leaderAppointee = new LeaderCache ( m_messenger . getZK ( ) , "initiateSPIMigrationIfRequested-" + m_partitionId , VoltZK . iv2appointees ) ; try { leaderAppointee . start ( true ) ; leaderAppointee . put ( pid , LeaderCache . suffixHSIdsWithMigratePartitionLeaderRequest ( newLeaderHSId ) ) ; } catch ( InterruptedException | ExecutionException | KeeperException e ) { VoltDB . crashLocalVoltDB ( "fail to start MigratePartitionLeader" , true , e ) ; } finally { try { leaderAppointee . shutdown ( ) ; } catch ( InterruptedException e ) { } } tmLog . info ( "MigratePartitionLeader for partition " + pid + " to " + CoreUtils . hsIdToString ( newLeaderHSId ) ) ; notifyNewLeaderOfTxnDoneIfNeeded ( ) ; }
rerouted from this moment on until the transactions are correctly routed to new leader .
32,071
private boolean checkMisroutedIv2IntiateTaskMessage ( Iv2InitiateTaskMessage message ) { if ( message . isForReplica ( ) ) { return false ; } if ( m_scheduler . isLeader ( ) && m_migratePartitionLeaderStatus != MigratePartitionLeaderStatus . TXN_RESTART ) { return false ; } InitiateResponseMessage response = new InitiateResponseMessage ( message ) ; response . setMisrouted ( message . getStoredProcedureInvocation ( ) ) ; response . m_sourceHSId = getHSId ( ) ; deliver ( response ) ; if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Sending message back on:" + CoreUtils . hsIdToString ( m_hsId ) + " isLeader:" + m_scheduler . isLeader ( ) + " status:" + m_migratePartitionLeaderStatus + "\n" + message ) ; } notifyNewLeaderOfTxnDoneIfNeeded ( ) ; return true ; }
if these requests are intended for leader . Client interface will restart these transactions .
32,072
private boolean checkMisroutedFragmentTaskMessage ( FragmentTaskMessage message ) { if ( m_scheduler . isLeader ( ) || message . isForReplica ( ) ) { return false ; } TransactionState txnState = ( ( ( SpScheduler ) m_scheduler ) . getTransactionState ( message . getTxnId ( ) ) ) ; if ( txnState == null ) { FragmentResponseMessage response = new FragmentResponseMessage ( message , getHSId ( ) ) ; TransactionRestartException restart = new TransactionRestartException ( "Transaction being restarted due to MigratePartitionLeader." , message . getTxnId ( ) ) ; restart . setMisrouted ( true ) ; response . setStatus ( FragmentResponseMessage . UNEXPECTED_ERROR , restart ) ; response . m_sourceHSId = getHSId ( ) ; response . setPartitionId ( m_partitionId ) ; if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "misRoutedFragMsg on site:" + CoreUtils . hsIdToString ( getHSId ( ) ) + "\n" + message ) ; } deliver ( response ) ; return true ; } if ( ! m_scheduler . isLeader ( ) && ! message . isForReplica ( ) ) { message . setExecutedOnPreviousLeader ( true ) ; txnState . setLeaderMigrationInvolved ( ) ; if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Follow-up fragment will be processed on " + CoreUtils . hsIdToString ( getHSId ( ) ) + "\n" + message ) ; } } return false ; }
After MigratePartitionLeader has been requested the fragments which are sent to leader site should be restarted .
32,073
private void handleLogRequest ( VoltMessage message ) { Iv2RepairLogRequestMessage req = ( Iv2RepairLogRequestMessage ) message ; int deadHostId = req . getDeadHostId ( ) ; if ( deadHostId != Integer . MAX_VALUE ) { if ( m_messenger . canCompleteRepair ( deadHostId ) ) { req . disableDeadHostCheck ( ) ; deliver ( message ) ; } else { if ( req . getRepairRetryCount ( ) > 100 && req . getRepairRetryCount ( ) % 100 == 0 ) { hostLog . warn ( "Repair Request for dead host " + deadHostId + " has not been processed yet because connection has not closed" ) ; } Runnable retryRepair = new Runnable ( ) { public void run ( ) { InitiatorMailbox . this . deliver ( message ) ; } } ; VoltDB . instance ( ) . scheduleWork ( retryRepair , 10 , - 1 , TimeUnit . MILLISECONDS ) ; } return ; } List < Iv2RepairLogResponseMessage > logs = m_repairLog . contents ( req . getRequestId ( ) , req . isMPIRequest ( ) ) ; if ( req . isMPIRequest ( ) ) { m_scheduler . cleanupTransactionBacklogOnRepair ( ) ; } for ( Iv2RepairLogResponseMessage log : logs ) { send ( message . m_sourceHSId , log ) ; } }
Produce the repair log . This is idempotent .
32,074
private void setMigratePartitionLeaderStatus ( MigratePartitionLeaderMessage message ) { if ( message . isStatusReset ( ) ) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; return ; } if ( m_migratePartitionLeaderStatus == MigratePartitionLeaderStatus . NONE ) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . TXN_DRAINED ; } else if ( m_migratePartitionLeaderStatus == MigratePartitionLeaderStatus . TXN_RESTART ) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; } tmLog . info ( "MigratePartitionLeader new leader " + CoreUtils . hsIdToString ( m_hsId ) + " is notified by previous leader " + CoreUtils . hsIdToString ( message . getPriorLeaderHSID ( ) ) + ". status:" + m_migratePartitionLeaderStatus ) ; }
that previous partition leader has drained its txns
32,075
public void setMigratePartitionLeaderStatus ( boolean migratePartitionLeader ) { if ( ! migratePartitionLeader ) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; m_newLeaderHSID = Long . MIN_VALUE ; return ; } if ( m_migratePartitionLeaderStatus == MigratePartitionLeaderStatus . TXN_DRAINED ) { m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; tmLog . info ( "MigratePartitionLeader transactions on previous partition leader are drained. New leader:" + CoreUtils . hsIdToString ( m_hsId ) + " status:" + m_migratePartitionLeaderStatus ) ; return ; } m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . TXN_RESTART ; tmLog . info ( "MigratePartitionLeader restart txns on new leader:" + CoreUtils . hsIdToString ( m_hsId ) + " status:" + m_migratePartitionLeaderStatus ) ; }
the site for new partition leader
32,076
public void notifyNewLeaderOfTxnDoneIfNeeded ( ) { if ( m_newLeaderHSID == Long . MIN_VALUE ) { return ; } SpScheduler scheduler = ( SpScheduler ) m_scheduler ; if ( ! scheduler . txnDoneBeforeCheckPoint ( ) ) { return ; } MigratePartitionLeaderMessage message = new MigratePartitionLeaderMessage ( m_hsId , m_newLeaderHSID ) ; send ( message . getNewLeaderHSID ( ) , message ) ; m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; m_repairLog . setLeaderState ( false ) ; tmLog . info ( "MigratePartitionLeader previous leader " + CoreUtils . hsIdToString ( m_hsId ) + " notifies new leader " + CoreUtils . hsIdToString ( m_newLeaderHSID ) + " transactions are drained." + " status:" + m_migratePartitionLeaderStatus ) ; m_newLeaderHSID = Long . MIN_VALUE ; }
Then new master can proceed to process transactions .
32,077
public void resetMigratePartitionLeaderStatus ( ) { m_scheduler . m_isLeader = true ; m_migratePartitionLeaderStatus = MigratePartitionLeaderStatus . NONE ; m_repairLog . setLeaderState ( true ) ; m_newLeaderHSID = Long . MIN_VALUE ; }
Reinstall the site as leader .
32,078
private Option resolveOption ( String opt ) { opt = Util . stripLeadingHyphens ( opt ) ; for ( Option option : options ) { if ( opt . equals ( option . getOpt ( ) ) ) { return option ; } if ( opt . equals ( option . getLongOpt ( ) ) ) { return option ; } } return null ; }
Retrieves the option object given the long or short option as a String
32,079
public String [ ] getArgs ( ) { String [ ] answer = new String [ args . size ( ) ] ; args . toArray ( answer ) ; return answer ; }
Retrieve any left - over non - recognized options and arguments
32,080
public boolean processScanNodeWithReAggNode ( AbstractPlanNode node , AbstractPlanNode reAggNode ) { for ( int i = 0 ; i < node . getChildCount ( ) ; i ++ ) { AbstractPlanNode child = node . getChild ( i ) ; if ( child instanceof AbstractScanPlanNode ) { AbstractScanPlanNode scanNode = ( AbstractScanPlanNode ) child ; if ( ! scanNode . getTargetTableName ( ) . equals ( getMVTableName ( ) ) ) { continue ; } if ( reAggNode != null ) { node . setAndLinkChild ( i , reAggNode ) ; } scanNode . addInlinePlanNode ( m_scanInlinedProjectionNode ) ; m_scanNode = scanNode ; return true ; } else { boolean replaced = processScanNodeWithReAggNode ( child , reAggNode ) ; if ( replaced ) { return true ; } } } return false ; }
Find the scan node on MV table replace it with reAggNode for join query . This scan node can not be in - lined so it should be as a child of a join node .
32,081
private void resolveColumnReferences ( ) { if ( isDistinctSelect || isGrouped ) { acceptsSequences = false ; } for ( int i = 0 ; i < rangeVariables . length ; i ++ ) { Expression e = rangeVariables [ i ] . nonIndexJoinCondition ; if ( e == null ) { continue ; } resolveColumnReferencesAndAllocate ( e , i + 1 , false ) ; } resolveColumnReferencesAndAllocate ( queryCondition , rangeVariables . length , false ) ; for ( int i = 0 ; i < indexLimitVisible ; i ++ ) { resolveColumnReferencesAndAllocate ( exprColumns [ i ] , rangeVariables . length , acceptsSequences ) ; } for ( int i = indexLimitVisible ; i < indexStartOrderBy ; i ++ ) { resolveColumnReferencesAndAllocate ( exprColumns [ i ] , rangeVariables . length , false ) ; } resolveColumnReferencesInGroupBy ( ) ; resolveColumnReferencesInOrderBy ( sortAndSlice ) ; }
Resolves all column expressions in the GROUP BY clause and beyond . Replaces any alias column expression in the ORDER BY cluase with the actual select column expression .
32,082
private int getMaxRowCount ( Session session , int rowCount ) { int limitStart = getLimitStart ( session ) ; int limitCount = getLimitCount ( session , rowCount ) ; if ( simpleLimit ) { if ( rowCount == 0 ) { rowCount = limitCount ; } if ( rowCount > Integer . MAX_VALUE - limitStart ) { rowCount = Integer . MAX_VALUE ; } else { rowCount += limitStart ; } } else { rowCount = Integer . MAX_VALUE ; if ( limitCount == 0 ) { rowCount = 0 ; } } return rowCount ; }
translate the rowCount into total number of rows needed from query including any rows skipped at the beginning
32,083
protected void dumpExprColumns ( String header ) { System . out . println ( "\n\n*********************************************" ) ; System . out . println ( header ) ; try { System . out . println ( getSQL ( ) ) ; } catch ( Exception e ) { } for ( int i = 0 ; i < exprColumns . length ; ++ i ) { if ( i == 0 ) System . out . println ( "Visible columns:" ) ; if ( i == indexStartOrderBy ) System . out . println ( "start order by:" ) ; if ( i == indexStartAggregates ) System . out . println ( "start aggregates:" ) ; if ( i == indexLimitVisible ) System . out . println ( "After limit of visible columns:" ) ; System . out . println ( i + ": " + exprColumns [ i ] ) ; } System . out . println ( "\n\n" ) ; }
Dumps the exprColumns list for this query specification . Writes to stdout .
32,084
public void updateEECacheStats ( long eeCacheSize , long hits , long misses , int partitionId ) { m_cache1Level = eeCacheSize ; m_cache1Hits += hits ; m_cacheMisses += misses ; m_invocations += hits + misses ; m_partitionId = partitionId ; }
Used to update EE cache stats without changing tracked time
32,085
public void endStatsCollection ( long cache1Size , long cache2Size , CacheUse cacheUse , long partitionId ) { if ( m_currentStartTime != null ) { long delta = System . nanoTime ( ) - m_currentStartTime ; if ( delta < 0 ) { if ( Math . abs ( delta ) > 1000000000 ) { log . info ( "Planner statistics recorded a negative planning time larger than one second: " + delta ) ; } } else { m_totalPlanningTime += delta ; m_minPlanningTime = Math . min ( delta , m_minPlanningTime ) ; m_maxPlanningTime = Math . max ( delta , m_maxPlanningTime ) ; m_lastMinPlanningTime = Math . min ( delta , m_lastMinPlanningTime ) ; m_lastMaxPlanningTime = Math . max ( delta , m_lastMaxPlanningTime ) ; } m_currentStartTime = null ; } m_cache1Level = cache1Size ; m_cache2Level = cache2Size ; switch ( cacheUse ) { case HIT1 : m_cache1Hits ++ ; break ; case HIT2 : m_cache2Hits ++ ; break ; case MISS : m_cacheMisses ++ ; break ; case FAIL : m_failures ++ ; break ; } m_invocations ++ ; m_partitionId = partitionId ; }
Called after planning or failing to plan . Records timer and cache stats .
32,086
protected void updateStatsRow ( Object rowKey , Object rowValues [ ] ) { super . updateStatsRow ( rowKey , rowValues ) ; rowValues [ columnNameToIndex . get ( "PARTITION_ID" ) ] = m_partitionId ; long totalTimedExecutionTime = m_totalPlanningTime ; long minExecutionTime = m_minPlanningTime ; long maxExecutionTime = m_maxPlanningTime ; long cache1Level = m_cache1Level ; long cache2Level = m_cache2Level ; long cache1Hits = m_cache1Hits ; long cache2Hits = m_cache2Hits ; long cacheMisses = m_cacheMisses ; long failureCount = m_failures ; if ( m_interval ) { totalTimedExecutionTime = m_totalPlanningTime - m_lastTimedPlanningTime ; m_lastTimedPlanningTime = m_totalPlanningTime ; minExecutionTime = m_lastMinPlanningTime ; maxExecutionTime = m_lastMaxPlanningTime ; m_lastMinPlanningTime = Long . MAX_VALUE ; m_lastMaxPlanningTime = Long . MIN_VALUE ; cache1Level = m_cache1Level - m_lastCache1Level ; m_lastCache1Level = m_cache1Level ; cache2Level = m_cache2Level - m_lastCache2Level ; m_lastCache2Level = m_cache2Level ; cache1Hits = m_cache1Hits - m_lastCache1Hits ; m_lastCache1Hits = m_cache1Hits ; cache2Hits = m_cache2Hits - m_lastCache2Hits ; m_lastCache2Hits = m_cache2Hits ; cacheMisses = m_cacheMisses - m_lastCacheMisses ; m_lastCacheMisses = m_cacheMisses ; failureCount = m_failures - m_lastFailures ; m_lastFailures = m_failures ; m_lastInvocations = m_invocations ; } rowValues [ columnNameToIndex . get ( VoltSystemProcedure . CNAME_SITE_ID ) ] = m_siteId ; rowValues [ columnNameToIndex . get ( "PARTITION_ID" ) ] = m_partitionId ; rowValues [ columnNameToIndex . get ( "CACHE1_LEVEL" ) ] = cache1Level ; rowValues [ columnNameToIndex . get ( "CACHE2_LEVEL" ) ] = cache2Level ; rowValues [ columnNameToIndex . get ( "CACHE1_HITS" ) ] = cache1Hits ; rowValues [ columnNameToIndex . get ( "CACHE2_HITS" ) ] = cache2Hits ; rowValues [ columnNameToIndex . get ( "CACHE_MISSES" ) ] = cacheMisses ; rowValues [ columnNameToIndex . get ( "PLAN_TIME_MIN" ) ] = minExecutionTime ; rowValues [ columnNameToIndex . get ( "PLAN_TIME_MAX" ) ] = maxExecutionTime ; if ( getSampleCount ( ) != 0 ) { rowValues [ columnNameToIndex . get ( "PLAN_TIME_AVG" ) ] = ( totalTimedExecutionTime / getSampleCount ( ) ) ; } else { rowValues [ columnNameToIndex . get ( "PLAN_TIME_AVG" ) ] = 0L ; } rowValues [ columnNameToIndex . get ( "FAILURES" ) ] = failureCount ; }
Update the rowValues array with the latest statistical information . This method is overrides the super class version which must also be called so that it can update its columns .
32,087
static void tag ( StringBuilder sb , String color , String text ) { sb . append ( "<span class='label label" ) ; if ( color != null ) { sb . append ( "-" ) . append ( color ) ; } String classText = text . replace ( ' ' , '_' ) ; sb . append ( " l-" ) . append ( classText ) . append ( "'>" ) . append ( text ) . append ( "</span>" ) ; }
Make an html bootstrap tag with our custom css class .
32,088
public static String report ( Catalog catalog , long minHeap , boolean isPro , int hostCount , int sitesPerHost , int kfactor , ArrayList < Feedback > warnings , String autoGenDDL ) throws IOException { new Thread ( ) { public void run ( ) { PlatformProperties . getPlatformProperties ( ) ; } } . start ( ) ; URL url = Resources . getResource ( ReportMaker . class , "template.html" ) ; String contents = Resources . toString ( url , Charsets . UTF_8 ) ; Cluster cluster = catalog . getClusters ( ) . get ( "cluster" ) ; assert ( cluster != null ) ; Database db = cluster . getDatabases ( ) . get ( "database" ) ; assert ( db != null ) ; String statsData = getStatsHTML ( db , minHeap , warnings ) ; contents = contents . replace ( "##STATS##" , statsData ) ; String procData = generateProceduresTable ( db . getTables ( ) , db . getProcedures ( ) ) ; contents = contents . replace ( "##PROCS##" , procData ) ; String schemaData = generateSchemaTable ( db ) ; contents = contents . replace ( "##SCHEMA##" , schemaData ) ; DatabaseSizes sizes = CatalogSizing . getCatalogSizes ( db , DrRoleType . XDCR . value ( ) . equals ( cluster . getDrrole ( ) ) ) ; String sizeData = generateSizeTable ( sizes ) ; contents = contents . replace ( "##SIZES##" , sizeData ) ; String clusterConfig = generateClusterConfiguration ( isPro , hostCount , sitesPerHost , kfactor ) ; contents = contents . replace ( "##CLUSTERCONFIG##" , clusterConfig ) ; String sizeSummary = generateSizeSummary ( sizes ) ; contents = contents . replace ( "##SIZESUMMARY##" , sizeSummary ) ; String heapSummary = generateRecommendedServerSettings ( sizes ) ; contents = contents . replace ( "##RECOMMENDEDSERVERSETTINGS##" , heapSummary ) ; String platformData = PlatformProperties . getPlatformProperties ( ) . toHTML ( ) ; contents = contents . replace ( "##PLATFORM##" , platformData ) ; contents = contents . replace ( "##VERSION##" , VoltDB . instance ( ) . getVersionString ( ) ) ; contents = contents . replace ( "##DDL##" , escapeHtml4 ( autoGenDDL ) ) ; DateFormat df = new SimpleDateFormat ( "d MMM yyyy HH:mm:ss z" ) ; contents = contents . replace ( "##TIMESTAMP##" , df . format ( m_timestamp ) ) ; String msg = Encoder . hexEncode ( VoltDB . instance ( ) . getVersionString ( ) + "," + System . currentTimeMillis ( ) ) ; contents = contents . replace ( "get.py?a=KEY&" , String . format ( "get.py?a=%s&" , msg ) ) ; return contents ; }
Generate the HTML catalog report from a newly compiled VoltDB catalog
32,089
public static String liveReport ( ) { byte [ ] reportbytes = VoltDB . instance ( ) . getCatalogContext ( ) . getFileInJar ( VoltCompiler . CATLOG_REPORT ) ; String report = new String ( reportbytes , Charsets . UTF_8 ) ; report = report . replace ( "<!--##RESOURCES" , "" ) ; report = report . replace ( "##RESOURCES , "" ) ; PlatformProperties pp = PlatformProperties . getPlatformProperties ( ) ; String ppStr = "<h4>Cluster Platform</h4>\n<p>" + pp . toHTML ( ) + "</p><br/>\n" ; report = report . replace ( "<!--##PLATFORM2## , ppStr ) ; if ( VoltDB . instance ( ) . getConfig ( ) . m_isEnterprise ) { report = report . replace ( "&b=r&" , "&b=e&" ) ; } else { report = report . replace ( "&b=r&" , "&b=c&" ) ; } return report ; }
Find the pre - compild catalog report in the jarfile and modify it for use in the the built - in web portal .
32,090
private static boolean turnOffClientInterface ( ) { VoltDBInterface vdbInstance = instance ( ) ; if ( vdbInstance != null ) { ClientInterface ci = vdbInstance . getClientInterface ( ) ; if ( ci != null ) { if ( ! ci . ceaseAllPublicFacingTrafficImmediately ( ) ) { return false ; } } } return true ; }
turn off client interface as fast as possible
32,091
private static void sendCrashSNMPTrap ( String msg ) { if ( msg == null || msg . trim ( ) . isEmpty ( ) ) { return ; } VoltDBInterface vdbInstance = instance ( ) ; if ( vdbInstance == null ) { return ; } SnmpTrapSender snmp = vdbInstance . getSnmpTrapSender ( ) ; if ( snmp == null ) { return ; } try { snmp . crash ( msg ) ; } catch ( Throwable t ) { VoltLogger log = new VoltLogger ( "HOST" ) ; log . warn ( "failed to issue a crash SNMP trap" , t ) ; } }
send a SNMP trap crash notification
32,092
public static void crashGlobalVoltDB ( String errMsg , boolean stackTrace , Throwable t ) { wasCrashCalled = true ; crashMessage = errMsg ; if ( ignoreCrash ) { throw new AssertionError ( "Faux crash of VoltDB successful." ) ; } sendCrashSNMPTrap ( errMsg ) ; try { if ( ! turnOffClientInterface ( ) ) { return ; } instance ( ) . getHostMessenger ( ) . sendPoisonPill ( errMsg ) ; Thread . sleep ( 500 ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; try { Thread . sleep ( 500 ) ; } catch ( InterruptedException e2 ) { } } finally { crashLocalVoltDB ( errMsg , stackTrace , t ) ; } }
Exit the process with an error message optionally with a stack trace . Also notify all connected peers that the node is going down .
32,093
public static void main ( String [ ] args ) { Configuration config = new Configuration ( args ) ; try { if ( ! config . validate ( ) ) { System . exit ( - 1 ) ; } else { if ( config . m_startAction == StartAction . GET ) { cli ( config ) ; } else { initialize ( config ) ; instance ( ) . run ( ) ; } } } catch ( OutOfMemoryError e ) { String errmsg = "VoltDB Main thread: ran out of Java memory. This node will shut down." ; VoltDB . crashLocalVoltDB ( errmsg , false , e ) ; } }
Entry point for the VoltDB server process .
32,094
public static String getDefaultReplicationInterface ( ) { if ( m_config . m_drInterface == null || m_config . m_drInterface . isEmpty ( ) ) { if ( m_config . m_externalInterface == null ) { return "" ; } else { return m_config . m_externalInterface ; } } else { return m_config . m_drInterface ; } }
Selects the a specified m_drInterface over a specified m_externalInterface from m_config
32,095
public void removeAllZeros ( ) { Iterator < Map . Entry < K , AtomicLong > > entryIterator = map . entrySet ( ) . iterator ( ) ; while ( entryIterator . hasNext ( ) ) { Map . Entry < K , AtomicLong > entry = entryIterator . next ( ) ; AtomicLong atomic = entry . getValue ( ) ; if ( atomic != null && atomic . get ( ) == 0L ) { entryIterator . remove ( ) ; } } }
Removes all mappings from this map whose values are zero .
32,096
public CompletableFuture < ClientResponseWithPartitionKey [ ] > callAllPartitionProcedure ( String procedureName , Object ... params ) { return m_runner . callAllPartitionProcedure ( procedureName , params ) ; }
A version of the similar API from VoltDB clients but for for non - transactional procedures . Runs a single - partition procedure on every partition that exists at the time it is called .
32,097
public static ByteBuffer getTableDataReference ( VoltTable vt ) { ByteBuffer buf = vt . m_buffer . duplicate ( ) ; buf . rewind ( ) ; return buf ; }
End users should not call this method . Obtain a reference to the table s underlying buffer . The returned reference s position and mark are independent of the table s buffer position and mark . The returned buffer has no mark and is at position 0 .
32,098
private long reserveNextTicket ( double requiredPermits , long nowMicros ) { resync ( nowMicros ) ; long microsToNextFreeTicket = Math . max ( 0 , nextFreeTicketMicros - nowMicros ) ; double storedPermitsToSpend = Math . min ( requiredPermits , this . storedPermits ) ; double freshPermits = requiredPermits - storedPermitsToSpend ; long waitMicros = storedPermitsToWaitTime ( this . storedPermits , storedPermitsToSpend ) + ( long ) ( freshPermits * stableIntervalMicros ) ; this . nextFreeTicketMicros = nextFreeTicketMicros + waitMicros ; this . storedPermits -= storedPermitsToSpend ; return microsToNextFreeTicket ; }
Reserves next ticket and returns the wait time that the caller must wait for .
32,099
public Options addOptionGroup ( OptionGroup group ) { if ( group . isRequired ( ) ) { requiredOpts . add ( group ) ; } for ( Option option : group . getOptions ( ) ) { option . setRequired ( false ) ; addOption ( option ) ; optionGroups . put ( option . getKey ( ) , group ) ; } return this ; }
Add the specified option group .