idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
31,800
public void writeData ( Object [ ] data , Type [ ] types ) { writeData ( types . length , types , data , null , null ) ; }
This method is called to write data for a table row .
31,801
void addWarning ( SQLWarning w ) { synchronized ( rootWarning_mutex ) { if ( rootWarning == null ) { rootWarning = w ; } else { rootWarning . setNextWarning ( w ) ; } } }
Adds another SQLWarning to this Connection object s warning chain .
31,802
public void reset ( ) throws SQLException { try { this . sessionProxy . resetSession ( ) ; } catch ( HsqlException e ) { throw Util . sqlException ( ErrorCode . X_08006 , e . getMessage ( ) , e ) ; } }
Resets this connection so it can be used again . Used when connections are returned to a connection pool .
31,803
private int onStartEscapeSequence ( String sql , StringBuffer sb , int i ) throws SQLException { sb . setCharAt ( i ++ , ' ' ) ; i = StringUtil . skipSpaces ( sql , i ) ; if ( sql . regionMatches ( true , i , "fn " , 0 , 3 ) || sql . regionMatches ( true , i , "oj " , 0 , 3 ) || sql . regionMatches ( true , i , "ts " , 0 , 3 ) ) { sb . setCharAt ( i ++ , ' ' ) ; sb . setCharAt ( i ++ , ' ' ) ; } else if ( sql . regionMatches ( true , i , "d " , 0 , 2 ) || sql . regionMatches ( true , i , "t " , 0 , 2 ) ) { sb . setCharAt ( i ++ , ' ' ) ; } else if ( sql . regionMatches ( true , i , "call " , 0 , 5 ) ) { i += 4 ; } else if ( sql . regionMatches ( true , i , "?= call " , 0 , 8 ) ) { sb . setCharAt ( i ++ , ' ' ) ; sb . setCharAt ( i ++ , ' ' ) ; i += 5 ; } else if ( sql . regionMatches ( true , i , "escape " , 0 , 7 ) ) { i += 6 ; } else { i -- ; throw Util . sqlException ( Error . error ( ErrorCode . JDBC_CONNECTION_NATIVE_SQL , sql . substring ( i ) ) ) ; } return i ; }
is called from within nativeSQL when the start of an JDBC escape sequence is encountered
31,804
synchronized long userUpdate ( long value ) { if ( value == currValue ) { currValue += increment ; return value ; } if ( increment > 0 ) { if ( value > currValue ) { currValue += ( ( value - currValue + increment ) / increment ) * increment ; } } else { if ( value < currValue ) { currValue += ( ( value - currValue + increment ) / increment ) * increment ; } } return value ; }
getter for a given value
31,805
synchronized long systemUpdate ( long value ) { if ( value == currValue ) { currValue += increment ; return value ; } if ( increment > 0 ) { if ( value > currValue ) { currValue = value + increment ; } } else { if ( value < currValue ) { currValue = value + increment ; } } return value ; }
Updates are necessary for text tables For memory tables the logged and scripted RESTART WITH will override this . No checks as values may have overridden the sequnece defaults
31,806
synchronized public long getValue ( ) { if ( limitReached ) { throw Error . error ( ErrorCode . X_2200H ) ; } long nextValue ; if ( increment > 0 ) { if ( currValue > maxValue - increment ) { if ( isCycle ) { nextValue = minValue ; } else { limitReached = true ; nextValue = minValue ; } } else { nextValue = currValue + increment ; } } else { if ( currValue < minValue - increment ) { if ( isCycle ) { nextValue = maxValue ; } else { limitReached = true ; nextValue = minValue ; } } else { nextValue = currValue + increment ; } } long result = currValue ; currValue = nextValue ; return result ; }
principal getter for the next sequence value
31,807
synchronized public void reset ( long value ) { if ( value < minValue || value > maxValue ) { throw Error . error ( ErrorCode . X_42597 ) ; } startValue = currValue = lastValue = value ; }
reset to new initial value
31,808
public int compareTo ( Sha1Wrapper arg0 ) { if ( arg0 == null ) return 1 ; for ( int i = 0 ; i < 20 ; i ++ ) { int cmp = hashBytes [ i ] - arg0 . hashBytes [ i ] ; if ( cmp != 0 ) return cmp ; } return 0 ; }
Not totally sure if this is a sensible ordering
31,809
private static void appendSpaces ( final StringBuilder sb , final int spaces ) { for ( int i = 0 ; i < spaces ; i ++ ) { sb . append ( SPACE ) ; } }
Appends the required number of spaces to the StringBuilder .
31,810
public static void setCatalogProcedurePartitionInfo ( VoltCompiler compiler , Database db , Procedure procedure , ProcedurePartitionData partitionData ) throws VoltCompilerException { ParititonDataReturnType partitionClauseData = resolvePartitionData ( compiler , db , procedure , partitionData . m_tableName , partitionData . m_columnName , partitionData . m_paramIndex ) ; procedure . setPartitionparameter ( partitionClauseData . partitionParamIndex ) ; procedure . setPartitioncolumn ( partitionClauseData . partitionColumn ) ; procedure . setPartitiontable ( partitionClauseData . partitionTable ) ; procedure . setSinglepartition ( true ) ; if ( partitionData . isTwoPartitionProcedure ( ) ) { partitionClauseData = resolvePartitionData ( compiler , db , procedure , partitionData . m_tableName2 , partitionData . m_columnName2 , partitionData . m_paramIndex2 ) ; procedure . setPartitionparameter2 ( partitionClauseData . partitionParamIndex ) ; procedure . setPartitioncolumn2 ( partitionClauseData . partitionColumn ) ; procedure . setPartitiontable2 ( partitionClauseData . partitionTable ) ; procedure . setSinglepartition ( false ) ; } }
Set partition table column and parameter index for catalog procedure
31,811
private KafkaInternalConsumerRunner createConsumerRunner ( Properties properties ) throws Exception { ClassLoader previous = Thread . currentThread ( ) . getContextClassLoader ( ) ; Thread . currentThread ( ) . setContextClassLoader ( getClass ( ) . getClassLoader ( ) ) ; try { Consumer < ByteBuffer , ByteBuffer > consumer = new KafkaConsumer < > ( properties ) ; return new KafkaInternalConsumerRunner ( this , m_config , consumer ) ; } finally { Thread . currentThread ( ) . setContextClassLoader ( previous ) ; } }
Create a Kafka consumer and runner .
31,812
void createSchema ( HsqlName name , Grantee owner ) { SqlInvariants . checkSchemaNameNotSystem ( name . name ) ; Schema schema = new Schema ( name , owner ) ; schemaMap . add ( name . name , schema ) ; }
Creates a schema belonging to the given grantee .
31,813
public HsqlName getSchemaHsqlName ( String name ) { if ( name == null ) { return defaultSchemaHsqlName ; } if ( SqlInvariants . INFORMATION_SCHEMA . equals ( name ) ) { return SqlInvariants . INFORMATION_SCHEMA_HSQLNAME ; } Schema schema = ( ( Schema ) schemaMap . get ( name ) ) ; if ( schema == null ) { throw Error . error ( ErrorCode . X_3F000 , name ) ; } return schema . name ; }
If schemaName is null return the default schema name else return the HsqlName object for the schema . If schemaName does not exist throw .
31,814
boolean isSchemaAuthorisation ( Grantee grantee ) { Iterator schemas = allSchemaNameIterator ( ) ; while ( schemas . hasNext ( ) ) { String schemaName = ( String ) schemas . next ( ) ; if ( grantee . equals ( toSchemaOwner ( schemaName ) ) ) { return true ; } } return false ; }
is a grantee the authorization of any schema
31,815
void dropSchemas ( Grantee grantee , boolean cascade ) { HsqlArrayList list = getSchemas ( grantee ) ; Iterator it = list . iterator ( ) ; while ( it . hasNext ( ) ) { Schema schema = ( Schema ) it . next ( ) ; dropSchema ( schema . name . name , cascade ) ; } }
drop all schemas with the given authorisation
31,816
public HsqlArrayList getAllTables ( ) { Iterator schemas = allSchemaNameIterator ( ) ; HsqlArrayList alltables = new HsqlArrayList ( ) ; while ( schemas . hasNext ( ) ) { String name = ( String ) schemas . next ( ) ; HashMappedList current = getTables ( name ) ; alltables . addAll ( current . values ( ) ) ; } return alltables ; }
Returns an HsqlArrayList containing references to all non - system tables and views . This includes all tables and views registered with this Database .
31,817
public Table getTable ( Session session , String name , String schema ) { Table t = null ; if ( schema == null ) { t = findSessionTable ( session , name , schema ) ; } if ( t == null ) { schema = session . getSchemaName ( schema ) ; t = findUserTable ( session , name , schema ) ; } if ( t == null ) { if ( SqlInvariants . INFORMATION_SCHEMA . equals ( schema ) && database . dbInfo != null ) { t = database . dbInfo . getSystemTable ( session , name ) ; } } if ( t == null ) { throw Error . error ( ErrorCode . X_42501 , name ) ; } return t ; }
Returns the specified user - defined table or view visible within the context of the specified Session or any system table of the given name . It excludes any temp tables created in other Sessions . Throws if the table does not exist in the context .
31,818
public Table getUserTable ( Session session , String name , String schema ) { Table t = findUserTable ( session , name , schema ) ; if ( t == null ) { throw Error . error ( ErrorCode . X_42501 , name ) ; } return t ; }
Returns the specified user - defined table or view visible within the context of the specified Session . It excludes system tables and any temp tables created in different Sessions . Throws if the table does not exist in the context .
31,819
public Table findUserTable ( Session session , String name , String schemaName ) { Schema schema = ( Schema ) schemaMap . get ( schemaName ) ; if ( schema == null ) { return null ; } if ( session != null ) { Table table = session . getLocalTable ( name ) ; if ( table != null ) { return table ; } } int i = schema . tableList . getIndex ( name ) ; if ( i == - 1 ) { return null ; } return ( Table ) schema . tableList . get ( i ) ; }
Returns the specified user - defined table or view visible within the context of the specified schema . It excludes system tables . Returns null if the table does not exist in the context .
31,820
public Table findSessionTable ( Session session , String name , String schemaName ) { return session . findSessionTable ( name ) ; }
Returns the specified session context table . Returns null if the table does not exist in the context .
31,821
void dropTableOrView ( Session session , Table table , boolean cascade ) { session . commit ( false ) ; if ( table . isView ( ) ) { removeSchemaObject ( table . getName ( ) , cascade ) ; } else { dropTable ( session , table , cascade ) ; } }
Drops the specified user - defined view or table from this Database object .
31,822
int getTableIndex ( Table table ) { Schema schema = ( Schema ) schemaMap . get ( table . getSchemaName ( ) . name ) ; if ( schema == null ) { return - 1 ; } HsqlName name = table . getName ( ) ; return schema . tableList . getIndex ( name . name ) ; }
Returns index of a table or view in the HashMappedList that contains the table objects for this Database .
31,823
void recompileDependentObjects ( Table table ) { OrderedHashSet set = getReferencingObjects ( table . getName ( ) ) ; Session session = database . sessionManager . getSysSession ( ) ; for ( int i = 0 ; i < set . size ( ) ; i ++ ) { HsqlName name = ( HsqlName ) set . get ( i ) ; switch ( name . type ) { case SchemaObject . VIEW : case SchemaObject . CONSTRAINT : case SchemaObject . ASSERTION : SchemaObject object = getSchemaObject ( name ) ; object . compile ( session ) ; break ; } } HsqlArrayList list = getAllTables ( ) ; for ( int i = 0 ; i < list . size ( ) ; i ++ ) { Table t = ( Table ) list . get ( i ) ; t . updateConstraintPath ( ) ; } }
After addition or removal of columns and indexes all views that reference the table should be recompiled .
31,824
Table findUserTableForIndex ( Session session , String name , String schemaName ) { Schema schema = ( Schema ) schemaMap . get ( schemaName ) ; HsqlName indexName = schema . indexLookup . getName ( name ) ; if ( indexName == null ) { return null ; } return findUserTable ( session , indexName . parent . name , schemaName ) ; }
Returns the table that has an index with the given name and schema .
31,825
public HsqlName getSchemaHsqlNameNoThrow ( String name , HsqlName defaultName ) { if ( name == null ) { return defaultSchemaHsqlName ; } if ( SqlInvariants . INFORMATION_SCHEMA . equals ( name ) ) { return SqlInvariants . INFORMATION_SCHEMA_HSQLNAME ; } Schema schema = ( ( Schema ) schemaMap . get ( name ) ) ; if ( schema == null ) { return defaultName ; } return schema . name ; }
If schemaName is null return the default schema name else return the HsqlName object for the schema . If schemaName does not exist return the defaultName provided . Not throwing the usual exception saves some throw - then - catch nonsense in the usual session setup .
31,826
public InitiateResponseMessage dedupe ( long inUniqueId , TransactionInfoBaseMessage in ) { if ( in instanceof Iv2InitiateTaskMessage ) { final Iv2InitiateTaskMessage init = ( Iv2InitiateTaskMessage ) in ; final StoredProcedureInvocation invocation = init . getStoredProcedureInvocation ( ) ; final String procName = invocation . getProcName ( ) ; if ( ! ( procName . equalsIgnoreCase ( "@LoadSinglepartitionTable" ) || procName . equalsIgnoreCase ( "@LoadMultipartitionTable" ) ) && inUniqueId <= m_lastSeenUniqueId ) { final InitiateResponseMessage resp = new InitiateResponseMessage ( init ) ; resp . setResults ( new ClientResponseImpl ( ClientResponseImpl . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , ClientResponseImpl . IGNORED_TRANSACTION ) ) ; return resp ; } } return null ; }
Dedupe initiate task messages . Check if the initiate task message is seen before .
31,827
public void updateLastSeenUniqueId ( long inUniqueId , TransactionInfoBaseMessage in ) { if ( in instanceof Iv2InitiateTaskMessage && inUniqueId > m_lastSeenUniqueId ) { m_lastSeenUniqueId = inUniqueId ; } }
Update the last seen uniqueId for this partition if it s an initiate task message .
31,828
public VoltMessage poll ( ) { if ( m_mustDrain || m_replayEntries . isEmpty ( ) ) { return null ; } if ( m_replayEntries . firstEntry ( ) . getValue ( ) . isEmpty ( ) ) { m_replayEntries . pollFirstEntry ( ) ; } checkDrainCondition ( ) ; if ( m_mustDrain || m_replayEntries . isEmpty ( ) ) { return null ; } VoltMessage m = m_replayEntries . firstEntry ( ) . getValue ( ) . poll ( ) ; updateLastPolledUniqueId ( m_replayEntries . firstEntry ( ) . getKey ( ) , ( TransactionInfoBaseMessage ) m ) ; return m ; }
Return the next correctly sequenced message or null if none exists .
31,829
public boolean offer ( long inUniqueId , TransactionInfoBaseMessage in ) { ReplayEntry found = m_replayEntries . get ( inUniqueId ) ; if ( in instanceof Iv2EndOfLogMessage ) { m_mpiEOLReached = true ; return true ; } if ( in instanceof MultiPartitionParticipantMessage ) { if ( inUniqueId <= m_lastPolledFragmentUniqueId ) { return true ; } if ( found == null ) { ReplayEntry newEntry = new ReplayEntry ( ) ; newEntry . m_sentinelUniqueId = inUniqueId ; m_replayEntries . put ( inUniqueId , newEntry ) ; } else { found . m_sentinelUniqueId = inUniqueId ; assert ( found . isReady ( ) ) ; } } else if ( in instanceof FragmentTaskMessage ) { if ( inUniqueId <= m_lastPolledFragmentUniqueId ) { return false ; } FragmentTaskMessage ftm = ( FragmentTaskMessage ) in ; if ( found == null ) { ReplayEntry newEntry = new ReplayEntry ( ) ; newEntry . m_firstFragment = ftm ; m_replayEntries . put ( inUniqueId , newEntry ) ; } else if ( found . m_firstFragment == null ) { found . m_firstFragment = ftm ; assert ( found . isReady ( ) ) ; } else { found . addQueuedMessage ( ftm ) ; } } else if ( in instanceof CompleteTransactionMessage ) { return false ; } else { if ( dedupe ( inUniqueId , in ) != null ) { return true ; } updateLastSeenUniqueId ( inUniqueId , in ) ; if ( m_replayEntries . isEmpty ( ) || ! m_replayEntries . lastEntry ( ) . getValue ( ) . hasSentinel ( ) ) { return false ; } else { m_replayEntries . lastEntry ( ) . getValue ( ) . addQueuedMessage ( in ) ; } } return true ; }
Offer a new message . Return false if the offered message can be run immediately .
31,830
private void verifyDataCapacity ( int size ) { if ( size + 4 > m_dataNetwork . capacity ( ) ) { m_dataNetworkOrigin . discard ( ) ; m_dataNetworkOrigin = org . voltcore . utils . DBBPool . allocateDirect ( size + 4 ) ; m_dataNetwork = m_dataNetworkOrigin . b ( ) ; m_dataNetwork . position ( 4 ) ; m_data = m_dataNetwork . slice ( ) ; } }
private int m_counter ;
31,831
public void initialize ( final int clusterIndex , final long siteId , final int partitionId , final int sitesPerHost , final int hostId , final String hostname , final int drClusterId , final int defaultDrBufferSize , final long tempTableMemory , final HashinatorConfig hashinatorConfig , final boolean createDrReplicatedStream , final long exportFlushTimeout ) { synchronized ( printLockObject ) { System . out . println ( "Initializing an IPC EE " + this + " for hostId " + hostId + " siteId " + siteId + " from thread " + Thread . currentThread ( ) . getId ( ) ) ; } int result = ExecutionEngine . ERRORCODE_ERROR ; m_data . clear ( ) ; m_data . putInt ( Commands . Initialize . m_id ) ; m_data . putInt ( clusterIndex ) ; m_data . putLong ( siteId ) ; m_data . putInt ( partitionId ) ; m_data . putInt ( sitesPerHost ) ; m_data . putInt ( hostId ) ; m_data . putInt ( drClusterId ) ; m_data . putInt ( defaultDrBufferSize ) ; m_data . putLong ( EELoggers . getLogLevels ( ) ) ; m_data . putLong ( tempTableMemory ) ; m_data . putInt ( createDrReplicatedStream ? 1 : 0 ) ; m_data . putInt ( ( short ) hostname . length ( ) ) ; m_data . put ( hostname . getBytes ( Charsets . UTF_8 ) ) ; try { m_data . flip ( ) ; m_connection . write ( ) ; result = m_connection . readStatusByte ( ) ; } catch ( final IOException e ) { System . out . println ( "Exception: " + e . getMessage ( ) ) ; throw new RuntimeException ( e ) ; } checkErrorCode ( result ) ; updateHashinator ( hashinatorConfig ) ; }
the abstract api assumes construction initializes but here initialization is just another command .
31,832
protected void coreLoadCatalog ( final long timestamp , final byte [ ] catalogBytes ) throws EEException { int result = ExecutionEngine . ERRORCODE_ERROR ; verifyDataCapacity ( catalogBytes . length + 100 ) ; m_data . clear ( ) ; m_data . putInt ( Commands . LoadCatalog . m_id ) ; m_data . putLong ( timestamp ) ; m_data . put ( catalogBytes ) ; m_data . put ( ( byte ) '\0' ) ; try { m_data . flip ( ) ; m_connection . write ( ) ; result = m_connection . readStatusByte ( ) ; } catch ( final IOException e ) { System . out . println ( "Exception: " + e . getMessage ( ) ) ; throw new RuntimeException ( e ) ; } checkErrorCode ( result ) ; }
write the catalog as a UTF - 8 byte string via connection
31,833
public void coreUpdateCatalog ( final long timestamp , final boolean isStreamUpdate , final String catalogDiffs ) throws EEException { int result = ExecutionEngine . ERRORCODE_ERROR ; try { final byte catalogBytes [ ] = catalogDiffs . getBytes ( "UTF-8" ) ; verifyDataCapacity ( catalogBytes . length + 100 ) ; m_data . clear ( ) ; m_data . putInt ( Commands . UpdateCatalog . m_id ) ; m_data . putLong ( timestamp ) ; m_data . putInt ( isStreamUpdate ? 1 : 0 ) ; m_data . put ( catalogBytes ) ; m_data . put ( ( byte ) '\0' ) ; } catch ( final UnsupportedEncodingException ex ) { Logger . getLogger ( ExecutionEngineIPC . class . getName ( ) ) . log ( Level . SEVERE , null , ex ) ; } try { m_data . flip ( ) ; m_connection . write ( ) ; result = m_connection . readStatusByte ( ) ; } catch ( final IOException e ) { System . out . println ( "Exception: " + e . getMessage ( ) ) ; throw new RuntimeException ( e ) ; } checkErrorCode ( result ) ; }
write the diffs as a UTF - 8 byte string via connection
31,834
private void sendDependencyTable ( final int dependencyId ) throws IOException { final byte [ ] dependencyBytes = nextDependencyAsBytes ( dependencyId ) ; if ( dependencyBytes == null ) { m_connection . m_socket . getOutputStream ( ) . write ( Connection . kErrorCode_DependencyNotFound ) ; return ; } final ByteBuffer message = ByteBuffer . allocate ( 1 + 4 + dependencyBytes . length ) ; message . put ( ( byte ) Connection . kErrorCode_DependencyFound ) ; message . putInt ( dependencyBytes . length ) ; message . put ( dependencyBytes ) ; message . rewind ( ) ; if ( m_connection . m_socketChannel . write ( message ) != message . capacity ( ) ) { throw new IOException ( "Unable to send dependency table to client. Attempted blocking write of " + message . capacity ( ) + " but not all of it was written" ) ; } }
Retrieve a dependency table and send it via the connection . If no table is available send a response code indicating such . The message is prepended with two lengths . One length is for the network layer and is the size of the whole message not including the length prefix .
31,835
boolean enableScoreboard ( ) { assert ( s_barrier != null ) ; try { s_barrier . await ( 3L , TimeUnit . MINUTES ) ; } catch ( InterruptedException | BrokenBarrierException | TimeoutException e ) { hostLog . error ( "Cannot re-enable the scoreboard." ) ; s_barrier . reset ( ) ; return false ; } m_scoreboardEnabled = true ; if ( hostLog . isDebugEnabled ( ) ) { hostLog . debug ( "Scoreboard has been enabled." ) ; } return true ; }
After all sites has been fully initialized and ready for snapshot we should enable the scoreboard .
31,836
synchronized void offer ( TransactionTask task ) { Iv2Trace . logTransactionTaskQueueOffer ( task ) ; TransactionState txnState = task . getTransactionState ( ) ; if ( ! m_backlog . isEmpty ( ) ) { if ( txnState . isSinglePartition ( ) ) { m_backlog . addLast ( task ) ; return ; } TransactionTask headTask = m_backlog . getFirst ( ) ; if ( txnState . isReadOnly ( ) && headTask . getTransactionState ( ) . isReadOnly ( ) ? TxnEgo . getSequence ( task . getTxnId ( ) ) != TxnEgo . getSequence ( headTask . getTxnId ( ) ) : TxnEgo . getSequence ( task . getTxnId ( ) ) > TxnEgo . getSequence ( headTask . getTxnId ( ) ) ) { m_backlog . addLast ( task ) ; } else if ( task . needCoordination ( ) && m_scoreboardEnabled ) { coordinatedTaskQueueOffer ( task ) ; } else { taskQueueOffer ( task ) ; } } else { if ( ! txnState . isSinglePartition ( ) ) { m_backlog . addLast ( task ) ; } if ( task . needCoordination ( ) && m_scoreboardEnabled ) { coordinatedTaskQueueOffer ( task ) ; } else { taskQueueOffer ( task ) ; } } }
If necessary stick this task in the backlog . Many network threads may be racing to reach here synchronize to serialize queue order
31,837
synchronized int flush ( long txnId ) { if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Flush backlog with txnId:" + TxnEgo . txnIdToString ( txnId ) + ", backlog head txnId is:" + ( m_backlog . isEmpty ( ) ? "empty" : TxnEgo . txnIdToString ( m_backlog . getFirst ( ) . getTxnId ( ) ) ) ) ; } int offered = 0 ; if ( m_backlog . isEmpty ( ) || ! m_backlog . getFirst ( ) . getTransactionState ( ) . isDone ( ) ) { return offered ; } if ( m_backlog . getFirst ( ) . getTxnId ( ) != txnId ) { return offered ; } m_backlog . removeFirst ( ) ; Iterator < TransactionTask > iter = m_backlog . iterator ( ) ; while ( iter . hasNext ( ) ) { TransactionTask task = iter . next ( ) ; long lastQueuedTxnId = task . getTxnId ( ) ; if ( task . needCoordination ( ) && m_scoreboardEnabled ) { coordinatedTaskQueueOffer ( task ) ; } else { taskQueueOffer ( task ) ; } ++ offered ; if ( task . getTransactionState ( ) . isSinglePartition ( ) ) { iter . remove ( ) ; continue ; } else { while ( iter . hasNext ( ) ) { task = iter . next ( ) ; if ( task . getTxnId ( ) == lastQueuedTxnId ) { iter . remove ( ) ; if ( task . needCoordination ( ) && m_scoreboardEnabled ) { coordinatedTaskQueueOffer ( task ) ; } else { taskQueueOffer ( task ) ; } ++ offered ; } } break ; } } return offered ; }
Try to offer as many runnable Tasks to the SiteTaskerQueue as possible .
31,838
public synchronized List < TransactionTask > getBacklogTasks ( ) { List < TransactionTask > pendingTasks = new ArrayList < > ( ) ; Iterator < TransactionTask > iter = m_backlog . iterator ( ) ; TransactionTask mpTask = iter . next ( ) ; assert ( ! mpTask . getTransactionState ( ) . isSinglePartition ( ) ) ; while ( iter . hasNext ( ) ) { TransactionTask task = iter . next ( ) ; if ( task . getTxnId ( ) == mpTask . getTxnId ( ) ) { continue ; } assert ( task . getTransactionState ( ) . isSinglePartition ( ) ) ; pendingTasks . add ( task ) ; } return pendingTasks ; }
Called from streaming snapshot execution
31,839
public synchronized void removeMPReadTransactions ( ) { TransactionTask task = m_backlog . peekFirst ( ) ; while ( task != null && task . getTransactionState ( ) . isReadOnly ( ) ) { task . getTransactionState ( ) . setDone ( ) ; flush ( task . getTxnId ( ) ) ; task = m_backlog . peekFirst ( ) ; } }
flush mp readonly transactions out of backlog
31,840
public List < List < GeographyPointValue > > getRings ( ) { List < List < GeographyPointValue > > llLoops = new ArrayList < > ( ) ; boolean isShell = true ; for ( List < XYZPoint > xyzLoop : m_loops ) { List < GeographyPointValue > llLoop = new ArrayList < > ( ) ; llLoop . add ( xyzLoop . get ( 0 ) . toGeographyPointValue ( ) ) ; int startIdx = ( isShell ? 1 : xyzLoop . size ( ) - 1 ) ; int endIdx = ( isShell ? xyzLoop . size ( ) : 0 ) ; int delta = ( isShell ? 1 : - 1 ) ; for ( int idx = startIdx ; idx != endIdx ; idx += delta ) { XYZPoint xyz = xyzLoop . get ( idx ) ; llLoop . add ( xyz . toGeographyPointValue ( ) ) ; } llLoop . add ( xyzLoop . get ( 0 ) . toGeographyPointValue ( ) ) ; llLoops . add ( llLoop ) ; isShell = false ; } return llLoops ; }
Return the list of rings of a polygon . The list has the same values as the list of rings used to construct the polygon or the sequence of WKT rings used to construct the polygon .
31,841
public String toWKT ( ) { StringBuffer sb = new StringBuffer ( ) ; sb . append ( "POLYGON (" ) ; boolean isFirstLoop = true ; for ( List < XYZPoint > loop : m_loops ) { if ( ! isFirstLoop ) { sb . append ( ", " ) ; } sb . append ( "(" ) ; int startIdx = ( isFirstLoop ? 1 : loop . size ( ) - 1 ) ; int endIdx = ( isFirstLoop ? loop . size ( ) : 0 ) ; int increment = ( isFirstLoop ? 1 : - 1 ) ; sb . append ( loop . get ( 0 ) . toGeographyPointValue ( ) . formatLngLat ( ) ) . append ( ", " ) ; for ( int idx = startIdx ; idx != endIdx ; idx += increment ) { XYZPoint xyz = loop . get ( idx ) ; sb . append ( xyz . toGeographyPointValue ( ) . formatLngLat ( ) ) ; sb . append ( ", " ) ; } sb . append ( loop . get ( 0 ) . toGeographyPointValue ( ) . formatLngLat ( ) ) ; sb . append ( ")" ) ; isFirstLoop = false ; } sb . append ( ")" ) ; return sb . toString ( ) ; }
Return a representation of this object as well - known text .
31,842
public int getLengthInBytes ( ) { long length = polygonOverheadInBytes ( ) ; for ( List < XYZPoint > loop : m_loops ) { length += loopLengthInBytes ( loop . size ( ) ) ; } return ( int ) length ; }
Return the number of bytes in the serialization for this polygon . Returned value does not include the 4 - byte length prefix that precedes variable - length types .
31,843
private static < T > void diagnoseLoop ( List < T > loop , String excpMsgPrf ) throws IllegalArgumentException { if ( loop == null ) { throw new IllegalArgumentException ( excpMsgPrf + "a polygon must contain at least one ring " + "(with each ring at least 4 points, including repeated closing vertex)" ) ; } if ( loop . size ( ) < 4 ) { throw new IllegalArgumentException ( excpMsgPrf + "a polygon ring must contain at least 4 points " + "(including repeated closing vertex)" ) ; } if ( loop . get ( 0 ) . equals ( loop . get ( loop . size ( ) - 1 ) ) == false ) { throw new IllegalArgumentException ( excpMsgPrf + "closing points of ring are not equal: \"" + loop . get ( 0 ) . toString ( ) + "\" != \"" + loop . get ( loop . size ( ) - 1 ) . toString ( ) + "\"" ) ; } }
A helper function to validate the loop structure If loop is invalid it generates IllegalArgumentException exception
31,844
public GeographyValue add ( GeographyPointValue offset ) { List < List < GeographyPointValue > > newLoops = new ArrayList < > ( ) ; for ( List < XYZPoint > oneLoop : m_loops ) { List < GeographyPointValue > loop = new ArrayList < > ( ) ; for ( XYZPoint p : oneLoop ) { loop . add ( p . toGeographyPointValue ( ) . add ( offset ) ) ; } loop . add ( oneLoop . get ( 0 ) . toGeographyPointValue ( ) . add ( offset ) ) ; newLoops . add ( loop ) ; } return new GeographyValue ( newLoops , true ) ; }
Create a new GeographyValue which is offset from this one by the given point . The latitude and longitude values stay in range because we are using the normalizing operations in GeographyPointValue .
31,845
public void sync ( ) { if ( isClosed ) { return ; } synchronized ( fileStreamOut ) { if ( needsSync ) { if ( busyWriting ) { forceSync = true ; return ; } try { fileStreamOut . flush ( ) ; outDescriptor . sync ( ) ; syncCount ++ ; } catch ( IOException e ) { Error . printSystemOut ( "flush() or sync() error: " + e . toString ( ) ) ; } needsSync = false ; forceSync = false ; } } }
Called internally or externally in write delay intervals .
31,846
protected void openFile ( ) { try { FileAccess fa = isDump ? FileUtil . getDefaultInstance ( ) : database . getFileAccess ( ) ; OutputStream fos = fa . openOutputStreamElement ( outFile ) ; outDescriptor = fa . getFileSync ( fos ) ; fileStreamOut = new BufferedOutputStream ( fos , 2 << 12 ) ; } catch ( IOException e ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_Message_Pair , new Object [ ] { e . toString ( ) , outFile } ) ; } }
File is opened in append mode although in current usage the file never pre - exists
31,847
private Runnable createRunnableLoggingTask ( final Level level , final Object message , final Throwable t ) { final String callerThreadName = Thread . currentThread ( ) . getName ( ) ; final Runnable runnableLoggingTask = new Runnable ( ) { public void run ( ) { Thread loggerThread = Thread . currentThread ( ) ; loggerThread . setName ( callerThreadName ) ; try { m_logger . log ( level , message , t ) ; } catch ( Throwable t ) { System . err . println ( "Exception thrown in logging thread for " + callerThreadName + ":" + t ) ; } finally { loggerThread . setName ( ASYNCH_LOGGER_THREAD_NAME ) ; } } } ; return runnableLoggingTask ; }
Generate a runnable task that logs one message in an exception - safe way .
31,848
private Runnable createRunnableL7dLoggingTask ( final Level level , final String key , final Object [ ] params , final Throwable t ) { final String callerThreadName = Thread . currentThread ( ) . getName ( ) ; final Runnable runnableLoggingTask = new Runnable ( ) { public void run ( ) { Thread loggerThread = Thread . currentThread ( ) ; loggerThread . setName ( callerThreadName ) ; try { m_logger . l7dlog ( level , key , params , t ) ; } catch ( Throwable t ) { System . err . println ( "Exception thrown in logging thread for " + callerThreadName + ":" + t ) ; } finally { loggerThread . setName ( ASYNCH_LOGGER_THREAD_NAME ) ; } } } ; return runnableLoggingTask ; }
Generate a runnable task that logs one localized message in an exception - safe way .
31,849
public static void configure ( String xmlConfig , File voltroot ) { try { Class < ? > loggerClz = Class . forName ( "org.voltcore.logging.VoltLog4jLogger" ) ; assert ( loggerClz != null ) ; Method configureMethod = loggerClz . getMethod ( "configure" , String . class , File . class ) ; configureMethod . invoke ( null , xmlConfig , voltroot ) ; } catch ( Exception e ) { } }
Static method to change the Log4j config globally . This fails if you re not using Log4j for now .
31,850
public T get ( String name ) { if ( m_items == null ) { return null ; } return m_items . get ( name . toUpperCase ( ) ) ; }
Get an item from the map by name
31,851
public Iterator < T > iterator ( ) { if ( m_items == null ) { m_items = new TreeMap < String , T > ( ) ; } return m_items . values ( ) . iterator ( ) ; }
Get an iterator for the items in the map
31,852
private static void validateMigrateStmt ( String sql , VoltXMLElement xmlSQL , Database db ) { final Map < String , String > attributes = xmlSQL . attributes ; assert attributes . size ( ) == 1 ; final Table targetTable = db . getTables ( ) . get ( attributes . get ( "table" ) ) ; assert targetTable != null ; final CatalogMap < TimeToLive > ttls = targetTable . getTimetolive ( ) ; if ( ttls . isEmpty ( ) ) { throw new PlanningErrorException ( String . format ( "%s: Cannot migrate from table %s because it does not have a TTL column" , sql , targetTable . getTypeName ( ) ) ) ; } else { final Column ttl = ttls . iterator ( ) . next ( ) . getTtlcolumn ( ) ; final TupleValueExpression columnExpression = new TupleValueExpression ( targetTable . getTypeName ( ) , ttl . getName ( ) , ttl . getIndex ( ) ) ; if ( ! ExpressionUtil . collectTerminals ( ExpressionUtil . from ( db , VoltXMLElementHelper . getFirstChild ( VoltXMLElementHelper . getFirstChild ( xmlSQL , "condition" ) , "operation" ) ) ) . contains ( columnExpression ) ) { throw new PlanningErrorException ( String . format ( "%s: Cannot migrate from table %s because the WHERE caluse does not contain TTL column %s" , sql , targetTable . getTypeName ( ) , ttl . getName ( ) ) ) ; } } }
Check that MIGRATE FROM tbl WHERE ... statement is valid .
31,853
public String parameterize ( ) { Set < Integer > paramIds = new HashSet < > ( ) ; ParameterizationInfo . findUserParametersRecursively ( m_xmlSQL , paramIds ) ; m_adhocUserParamsCount = paramIds . size ( ) ; m_paramzInfo = null ; if ( paramIds . size ( ) == 0 ) { m_paramzInfo = ParameterizationInfo . parameterize ( m_xmlSQL ) ; } if ( m_paramzInfo != null ) { m_planSelector . outputParameterizedCompiledStatement ( m_paramzInfo . getParameterizedXmlSQL ( ) ) ; return m_paramzInfo . getParameterizedXmlSQL ( ) . toMinString ( ) ; } return m_xmlSQL . toMinString ( ) ; }
Auto - parameterize all of the literals in the parsed SQL statement .
31,854
public CompiledPlan plan ( ) throws PlanningErrorException { m_recentErrorMsg = null ; if ( m_paramzInfo != null ) { try { CompiledPlan plan = compileFromXML ( m_paramzInfo . getParameterizedXmlSQL ( ) , m_paramzInfo . getParamLiteralValues ( ) ) ; if ( plan != null ) { if ( plan . extractParamValues ( m_paramzInfo ) ) { return plan ; } } else if ( DEBUGGING_STATIC_MODE_TO_RETRY_ON_ERROR ) { compileFromXML ( m_paramzInfo . getParameterizedXmlSQL ( ) , m_paramzInfo . getParamLiteralValues ( ) ) ; } } catch ( Exception | StackOverflowError e ) { m_hasExceptionWhenParameterized = true ; m_recentErrorMsg = null ; m_partitioning . resetAnalysisState ( ) ; } } CompiledPlan plan = compileFromXML ( m_xmlSQL , null ) ; if ( plan == null ) { if ( DEBUGGING_STATIC_MODE_TO_RETRY_ON_ERROR ) { plan = compileFromXML ( m_xmlSQL , null ) ; } throw new PlanningErrorException ( m_recentErrorMsg ) ; } return plan ; }
Get the best plan for the SQL statement given assuming the given costModel .
31,855
private void harmonizeCommonTableSchemas ( CompiledPlan plan ) { List < AbstractPlanNode > seqScanNodes = plan . rootPlanGraph . findAllNodesOfClass ( SeqScanPlanNode . class ) ; for ( AbstractPlanNode planNode : seqScanNodes ) { SeqScanPlanNode seqScanNode = ( SeqScanPlanNode ) planNode ; StmtCommonTableScan scan = seqScanNode . getCommonTableScan ( ) ; if ( scan != null ) { scan . harmonizeOutputSchema ( ) ; } } }
Make sure that schemas in base and recursive plans in common table scans have identical schemas . This is important because otherwise we will get data corruption in the EE . We look for SeqScanPlanNodes then look for a common table scan and ask the scan node to harmonize its schemas .
31,856
public void resetCapacity ( int newCapacity , int newPolicy ) throws IllegalArgumentException { if ( newCapacity != 0 && hashIndex . elementCount > newCapacity ) { int surplus = hashIndex . elementCount - newCapacity ; surplus += ( surplus >> 5 ) ; if ( surplus > hashIndex . elementCount ) { surplus = hashIndex . elementCount ; } clear ( surplus , ( surplus >> 6 ) ) ; } if ( newCapacity != 0 && newCapacity < threshold ) { rehash ( newCapacity ) ; if ( newCapacity < hashIndex . elementCount ) { newCapacity = maxCapacity ; } } this . maxCapacity = newCapacity ; this . purgePolicy = newPolicy ; }
In rare circumstances resetCapacity may not succeed in which case capacity remains unchanged but purge policy is set to newPolicy
31,857
protected void initParams ( Database database , String baseFileName ) { fileName = baseFileName + ".data.tmp" ; this . database = database ; fa = FileUtil . getDefaultInstance ( ) ; int cacheSizeScale = 10 ; cacheFileScale = 8 ; Error . printSystemOut ( "cache_size_scale: " + cacheSizeScale ) ; maxCacheSize = 2048 ; int avgRowBytes = 1 << cacheSizeScale ; maxCacheBytes = maxCacheSize * avgRowBytes ; maxDataFileSize = ( long ) Integer . MAX_VALUE * 4 ; dataFile = null ; }
Initial external parameters are set here . The size if fixed .
31,858
public synchronized void close ( boolean write ) { try { if ( dataFile != null ) { dataFile . close ( ) ; dataFile = null ; fa . removeElement ( fileName ) ; } } catch ( Throwable e ) { database . logger . appLog . logContext ( e , null ) ; throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_DataFileCache_close , new Object [ ] { e , fileName } ) ; } }
Parameter write is always false . The backing file is simply closed and deleted .
31,859
private AbstractPlanNode recursivelyApply ( AbstractPlanNode plan , int childIdx ) { if ( plan instanceof InsertPlanNode ) { InsertPlanNode insertNode = ( InsertPlanNode ) plan ; assert ( insertNode . getChildCount ( ) == 1 ) ; AbstractPlanNode abstractChild = insertNode . getChild ( 0 ) ; ScanPlanNodeWhichCanHaveInlineInsert targetNode = ( abstractChild instanceof ScanPlanNodeWhichCanHaveInlineInsert ) ? ( ( ScanPlanNodeWhichCanHaveInlineInsert ) abstractChild ) : null ; if ( targetNode != null && ! insertNode . isUpsert ( ) && ! targetNode . hasInlineAggregateNode ( ) && ! targetNode . getTargetTableName ( ) . equalsIgnoreCase ( insertNode . getTargetTableName ( ) ) ) { AbstractPlanNode parent = ( insertNode . getParentCount ( ) > 0 ) ? insertNode . getParent ( 0 ) : null ; AbstractPlanNode abstractTargetNode = targetNode . getAbstractNode ( ) ; abstractTargetNode . addInlinePlanNode ( insertNode ) ; insertNode . clearChildren ( ) ; insertNode . clearParents ( ) ; abstractTargetNode . clearParents ( ) ; if ( parent != null ) { parent . setAndLinkChild ( childIdx , abstractTargetNode ) ; } plan = abstractTargetNode ; } return plan ; } for ( int idx = 0 ; idx < plan . getChildCount ( ) ; idx += 1 ) { AbstractPlanNode child = plan . getChild ( idx ) ; recursivelyApply ( child , idx ) ; } return plan ; }
This helper function is called when we recurse down the childIdx - th child of a parent node .
31,860
static void updateTableNames ( List < ParsedColInfo > src , String tblName ) { src . forEach ( ci -> ci . updateTableName ( tblName , tblName ) . toTVE ( ci . m_index , ci . m_index ) ) ; }
table names .
31,861
ParsedSelectStmt rewriteAsMV ( Table view ) { m_groupByColumns . clear ( ) ; m_distinctGroupByColumns = null ; m_groupByExpressions . clear ( ) ; m_distinctProjectSchema = null ; m_distinct = m_hasAggregateExpression = m_hasComplexGroupby = m_hasComplexAgg = false ; setParamsByIndex ( new TreeMap < > ( ) ) ; m_paramsById . clear ( ) ; m_paramValues = null ; m_tableList . clear ( ) ; m_tableList . add ( view ) ; m_tableAliasMap . clear ( ) ; m_tableAliasListAsJoinOrder . clear ( ) ; m_tableAliasListAsJoinOrder . add ( view . getTypeName ( ) ) ; m_joinTree = new TableLeafNode ( 0 , null , null , generateStmtTableScan ( view ) ) ; prepareMVBasedQueryFix ( ) ; return this ; }
Updates miscellaneous fields as part of rewriting as materialized view .
31,862
public StmtTargetTableScan generateStmtTableScan ( Table view ) { StmtTargetTableScan st = new StmtTargetTableScan ( view ) ; m_displayColumns . forEach ( ci -> st . resolveTVE ( ( TupleValueExpression ) ( ci . m_expression ) ) ) ; defineTableScanByAlias ( view . getTypeName ( ) , st ) ; return st ; }
Generate table scan and add the scan to m_tableAliasMap
31,863
public void switchOptimalSuiteForAvgPushdown ( ) { m_displayColumns = m_avgPushdownDisplayColumns ; m_aggResultColumns = m_avgPushdownAggResultColumns ; m_groupByColumns = m_avgPushdownGroupByColumns ; m_distinctGroupByColumns = m_avgPushdownDistinctGroupByColumns ; m_orderColumns = m_avgPushdownOrderColumns ; m_projectSchema = m_avgPushdownProjectSchema ; m_distinctProjectSchema = m_avgPushdownFinalProjectSchema ; m_hasComplexAgg = true ; m_having = m_avgPushdownHaving ; }
Switch the optimal set for pushing down AVG
31,864
private void prepareMVBasedQueryFix ( ) { if ( m_hasComplexGroupby ) { m_mvFixInfo . setEdgeCaseQueryNoFixNeeded ( false ) ; } for ( StmtTableScan mvTableScan : allScans ( ) ) { Set < SchemaColumn > mvNewScanColumns = new HashSet < > ( ) ; Collection < SchemaColumn > columns = mvTableScan . getScanColumns ( ) ; if ( columns != null ) { mvNewScanColumns . addAll ( columns ) ; } if ( m_mvFixInfo . processMVBasedQueryFix ( mvTableScan , mvNewScanColumns , m_joinTree , m_aggResultColumns , groupByColumns ( ) ) ) { break ; } } }
Prepare for the mv based distributed query fix only if it might be required .
31,865
private void placeTVEsinColumns ( ) { Map < AbstractExpression , Integer > aggTableIndexMap = new HashMap < > ( ) ; Map < Integer , ParsedColInfo > indexToColumnMap = new HashMap < > ( ) ; int index = 0 ; for ( ParsedColInfo col : m_aggResultColumns ) { aggTableIndexMap . put ( col . m_expression , index ) ; if ( col . m_alias == null ) { col . m_alias = "$$_" + col . m_expression . getExpressionType ( ) . symbol ( ) + "_$$_" + index ; } indexToColumnMap . put ( index , col ) ; index ++ ; } m_groupByExpressions = new HashMap < > ( ) ; for ( ParsedColInfo groupbyCol : m_groupByColumns ) { AbstractExpression expr = groupbyCol . m_expression ; assert ( aggTableIndexMap . get ( expr ) != null ) ; expr = expr . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; m_groupByExpressions . put ( groupbyCol . m_alias , expr ) ; } if ( m_having != null ) { m_having = m_having . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; ExpressionUtil . finalizeValueTypes ( m_having ) ; } m_projectSchema = new NodeSchema ( ) ; for ( ParsedColInfo col : m_displayColumns ) { AbstractExpression expr = col . m_expression ; if ( hasComplexAgg ( ) ) { expr = expr . replaceWithTVE ( aggTableIndexMap , indexToColumnMap ) ; } m_projectSchema . addColumn ( col . m_tableName , col . m_tableAlias , col . m_columnName , col . m_alias , expr , col . m_differentiator ) ; } placeTVEsForOrderby ( aggTableIndexMap , indexToColumnMap ) ; }
Generate new output Schema and Place TVEs for display columns if needed . Place TVEs for order by columns always .
31,866
private void insertAggExpressionsToAggResultColumns ( List < AbstractExpression > aggColumns , ParsedColInfo cookedCol ) { for ( AbstractExpression expr : aggColumns ) { assert ( expr instanceof AggregateExpression ) ; if ( expr . hasSubquerySubexpression ( ) ) { throw new PlanningErrorException ( "SQL Aggregate function calls with subquery expression arguments are not allowed." ) ; } ParsedColInfo col = new ParsedColInfo ( ) ; col . m_expression = expr . clone ( ) ; assert ( col . m_expression instanceof AggregateExpression ) ; if ( col . m_expression . getExpressionType ( ) == ExpressionType . AGGREGATE_AVG ) { m_hasAverage = true ; } if ( aggColumns . size ( ) == 1 && cookedCol . m_expression . equals ( aggColumns . get ( 0 ) ) ) { col . m_alias = cookedCol . m_alias ; col . m_tableName = cookedCol . m_tableName ; col . m_tableAlias = cookedCol . m_tableAlias ; col . m_columnName = cookedCol . m_columnName ; if ( ! m_aggResultColumns . contains ( col ) ) { m_aggResultColumns . add ( col ) ; } return ; } m_hasComplexAgg = true ; col . m_tableName = TEMP_TABLE_NAME ; col . m_tableAlias = TEMP_TABLE_NAME ; col . m_columnName = "" ; if ( ! m_aggResultColumns . contains ( col ) ) { m_aggResultColumns . add ( col ) ; } ExpressionUtil . finalizeValueTypes ( col . m_expression ) ; } }
ParseDisplayColumns and ParseOrderColumns will call this function to add Aggregation expressions to aggResultColumns
31,867
private static void insertToColumnList ( List < ParsedColInfo > columnList , List < ParsedColInfo > newCols ) { for ( ParsedColInfo col : newCols ) { if ( ! columnList . contains ( col ) ) { columnList . add ( col ) ; } } }
Concat elements to the XXXColumns list
31,868
private void findAllTVEs ( AbstractExpression expr , List < TupleValueExpression > tveList ) { if ( ! isNewtoColumnList ( m_aggResultColumns , expr ) ) { return ; } if ( expr instanceof TupleValueExpression ) { tveList . add ( ( TupleValueExpression ) expr . clone ( ) ) ; return ; } if ( expr . getLeft ( ) != null ) { findAllTVEs ( expr . getLeft ( ) , tveList ) ; } if ( expr . getRight ( ) != null ) { findAllTVEs ( expr . getRight ( ) , tveList ) ; } if ( expr . getArgs ( ) != null ) { for ( AbstractExpression ae : expr . getArgs ( ) ) { findAllTVEs ( ae , tveList ) ; } } }
Find all TVEs except inside of AggregationExpression
31,869
private void verifyWindowFunctionExpressions ( ) { if ( m_windowFunctionExpressions . size ( ) > 0 ) { if ( m_windowFunctionExpressions . size ( ) > 1 ) { throw new PlanningErrorException ( "Only one windowed function call may appear in a selection list." ) ; } if ( m_hasAggregateExpression ) { throw new PlanningErrorException ( "Use of window functions (in an OVER clause) isn't supported with other aggregate functions on the SELECT list." ) ; } if ( m_windowFunctionExpressions . get ( 0 ) . hasSubqueryArgs ( ) ) { throw new PlanningErrorException ( "Window function calls with subquery expression arguments are not allowed." ) ; } WindowFunctionExpression windowFunctionExpression = m_windowFunctionExpressions . get ( 0 ) ; List < AbstractExpression > orderByExpressions = windowFunctionExpression . getOrderByExpressions ( ) ; ExpressionType exprType = windowFunctionExpression . getExpressionType ( ) ; String aggName = exprType . symbol ( ) . toUpperCase ( ) ; switch ( exprType ) { case AGGREGATE_WINDOWED_RANK : case AGGREGATE_WINDOWED_DENSE_RANK : if ( orderByExpressions . size ( ) == 0 ) { throw new PlanningErrorException ( "Windowed " + aggName + " function call expressions require an ORDER BY specification." ) ; } VoltType valType = orderByExpressions . get ( 0 ) . getValueType ( ) ; assert ( valType != null ) ; if ( ! valType . isAnyIntegerType ( ) && ( valType != VoltType . TIMESTAMP ) ) { throw new PlanningErrorException ( "Windowed function call expressions can have only integer or TIMESTAMP value types in the ORDER BY expression of their window." ) ; } break ; case AGGREGATE_WINDOWED_COUNT : if ( windowFunctionExpression . getAggregateArguments ( ) . size ( ) > 1 ) { throw new PlanningErrorException ( String . format ( "Windowed COUNT must have either exactly one argument or else a star for an argument" ) ) ; } break ; case AGGREGATE_WINDOWED_MAX : case AGGREGATE_WINDOWED_MIN : if ( windowFunctionExpression . getAggregateArguments ( ) . size ( ) != 1 ) { throw new PlanningErrorException ( String . format ( "Windowed %s must have exactly one argument" , aggName ) ) ; } break ; case AGGREGATE_WINDOWED_SUM : if ( windowFunctionExpression . getAggregateArguments ( ) . size ( ) != 1 ) { throw new PlanningErrorException ( String . format ( "Windowed SUM must have exactly one numeric argument" ) ) ; } AbstractExpression arg = windowFunctionExpression . getAggregateArguments ( ) . get ( 0 ) ; VoltType vt = arg . getValueType ( ) ; assert ( vt != null ) ; if ( ! vt . isNumber ( ) ) { throw new PlanningErrorException ( "Windowed SUM must have exactly one numeric argument" ) ; } break ; case AGGREGATE_WINDOWED_ROW_NUMBER : break ; default : { String opName = ( exprType == null ) ? "NULL" : exprType . symbol ( ) ; throw new PlanningErrorException ( "Unknown windowed aggregate function type: " + opName ) ; } } } }
Verify the validity of the windowed expressions .
31,870
private boolean canPushdownLimit ( ) { boolean limitCanPushdown = ( m_limitOffset . hasLimit ( ) && ! m_distinct ) ; if ( limitCanPushdown ) { for ( ParsedColInfo col : m_displayColumns ) { AbstractExpression rootExpr = col . m_expression ; if ( rootExpr instanceof AggregateExpression ) { if ( ( ( AggregateExpression ) rootExpr ) . isDistinct ( ) ) { limitCanPushdown = false ; break ; } } } } return limitCanPushdown ; }
Check if the LimitPlanNode can be pushed down . The LimitPlanNode may have a LIMIT clause only OFFSET clause only or both . Offset only cannot be pushed down .
31,871
private boolean isValidJoinOrder ( List < String > tableAliases ) { assert ( m_joinTree != null ) ; List < JoinNode > subTrees = m_joinTree . extractSubTrees ( ) ; int tableNameIdx = 0 ; List < JoinNode > finalSubTrees = new ArrayList < > ( ) ; for ( int i = subTrees . size ( ) - 1 ; i >= 0 ; -- i ) { JoinNode subTree = subTrees . get ( i ) ; List < JoinNode > subTableNodes = subTree . generateLeafNodesJoinOrder ( ) ; JoinNode joinOrderSubTree ; if ( ( subTree instanceof BranchNode ) && ( ( BranchNode ) subTree ) . getJoinType ( ) != JoinType . INNER ) { joinOrderSubTree = subTree ; for ( JoinNode tableNode : subTableNodes ) { if ( tableNode . getId ( ) >= 0 ) { String tableAlias = tableNode . getTableAlias ( ) ; if ( ! tableAliases . get ( tableNameIdx ++ ) . equals ( tableAlias ) ) { return false ; } } } } else { Map < String , JoinNode > nodeNameMap = new HashMap < > ( ) ; for ( JoinNode tableNode : subTableNodes ) { if ( tableNode . getId ( ) >= 0 ) { nodeNameMap . put ( tableNode . getTableAlias ( ) , tableNode ) ; } } List < JoinNode > joinOrderSubNodes = new ArrayList < > ( ) ; for ( int j = 0 ; j < subTableNodes . size ( ) ; ++ j ) { if ( subTableNodes . get ( j ) . getId ( ) >= 0 ) { assert ( tableNameIdx < tableAliases . size ( ) ) ; String tableAlias = tableAliases . get ( tableNameIdx ) ; if ( tableAlias == null || ! nodeNameMap . containsKey ( tableAlias ) ) { return false ; } joinOrderSubNodes . add ( nodeNameMap . get ( tableAlias ) ) ; ++ tableNameIdx ; } else { joinOrderSubNodes . add ( subTableNodes . get ( j ) ) ; } } joinOrderSubTree = JoinNode . reconstructJoinTreeFromTableNodes ( joinOrderSubNodes , JoinType . INNER ) ; AbstractExpression combinedWhereExpr = subTree . getAllFilters ( ) ; if ( combinedWhereExpr != null ) { joinOrderSubTree . setWhereExpression ( combinedWhereExpr . clone ( ) ) ; } joinOrderSubTree . setId ( subTree . getId ( ) ) ; } finalSubTrees . add ( 0 , joinOrderSubTree ) ; } JoinNode newNode = JoinNode . reconstructJoinTreeFromSubTrees ( finalSubTrees ) ; m_joinOrderList . add ( newNode ) ; return true ; }
Validate the specified join order against the join tree . In general outer joins are not associative and commutative . Not all orders are valid . In case of a valid join order the initial join tree is rebuilt to match the specified order
31,872
public boolean isOrderDeterministic ( ) { if ( ! hasTopLevelScans ( ) ) { return true ; } if ( hasAOneRowResult ( ) ) { return true ; } if ( ! hasOrderByColumns ( ) ) { return false ; } ArrayList < AbstractExpression > nonOrdered = new ArrayList < > ( ) ; if ( isGrouped ( ) ) { if ( orderByColumnsDetermineAllColumns ( m_groupByColumns , nonOrdered ) ) { return true ; } if ( orderByColumnsDetermineAllDisplayColumns ( nonOrdered ) ) { return true ; } } else { if ( orderByColumnsDetermineAllDisplayColumns ( nonOrdered ) ) { return true ; } if ( orderByColumnsCoverUniqueKeys ( ) ) { return true ; } } return false ; }
Returns true if this select statement can be proved to always produce its result rows in the same order every time that it is executed .
31,873
public boolean orderByColumnsDetermineAllDisplayColumnsForUnion ( List < ParsedColInfo > orderColumns ) { Set < AbstractExpression > orderExprs = new HashSet < > ( ) ; for ( ParsedColInfo col : orderColumns ) { orderExprs . add ( col . m_expression ) ; } for ( ParsedColInfo col : m_displayColumns ) { if ( ! orderExprs . contains ( col . m_expression ) ) { return false ; } } return true ; }
This is a very simple version of the above method for when an ORDER BY clause appears on a UNION . Does the ORDER BY clause reference every item on the display list? If so then the order is deterministic .
31,874
public boolean isPartitionColumnInWindowedAggregatePartitionByList ( ) { if ( getWindowFunctionExpressions ( ) . size ( ) == 0 ) { return false ; } assert ( getWindowFunctionExpressions ( ) . size ( ) == 1 ) ; WindowFunctionExpression we = getWindowFunctionExpressions ( ) . get ( 0 ) ; List < AbstractExpression > partitionByExprs = we . getPartitionByExpressions ( ) ; boolean foundPartExpr = false ; for ( AbstractExpression ae : partitionByExprs ) { if ( ! ( ae instanceof TupleValueExpression ) ) { continue ; } TupleValueExpression tve = ( TupleValueExpression ) ae ; String tableAlias = tve . getTableAlias ( ) ; String columnName = tve . getColumnName ( ) ; StmtTableScan scanTable = getStmtTableScanByAlias ( tableAlias ) ; if ( scanTable == null || scanTable . getPartitioningColumns ( ) == null ) { continue ; } boolean foundPartCol = false ; for ( SchemaColumn pcol : scanTable . getPartitioningColumns ( ) ) { if ( pcol != null && pcol . getColumnName ( ) . equals ( columnName ) ) { foundPartCol = true ; break ; } } if ( foundPartCol ) { foundPartExpr = true ; break ; } } return foundPartExpr ; }
Return true iff all the windowed partition expressions have a table partition column in their partition by list and if there is one such windowed partition expression . If there are no windowed expressions we return false . Note that there can only be one windowed expression currently so this is more general than it needs to be .
31,875
public static GeographyValue CreateRegularConvex ( GeographyPointValue center , GeographyPointValue firstVertex , int numVertices , double sizeOfHole ) { assert ( 0 <= sizeOfHole && sizeOfHole < 1.0 ) ; double phi = 360.0 / numVertices ; GeographyPointValue holeFirstVertex = null ; if ( sizeOfHole > 0 ) { holeFirstVertex = firstVertex . scale ( center , sizeOfHole ) ; } List < GeographyPointValue > oneLoop = new ArrayList < > ( ) ; List < GeographyPointValue > hole = ( sizeOfHole < 0 ? null : new ArrayList < > ( ) ) ; for ( int idx = 0 ; idx < numVertices ; idx += 1 ) { int holeIdx = numVertices - idx ; oneLoop . add ( firstVertex . rotate ( idx * phi , center ) ) ; if ( sizeOfHole > 0 ) { hole . add ( holeFirstVertex . rotate ( - ( holeIdx * phi ) , center ) ) ; } } oneLoop . add ( firstVertex ) ; if ( sizeOfHole > 0 ) { hole . add ( holeFirstVertex ) ; } List < List < GeographyPointValue > > loops = new ArrayList < > ( ) ; loops . add ( oneLoop ) ; if ( sizeOfHole > 0 ) { loops . add ( hole ) ; } return new GeographyValue ( loops ) ; }
Create a regular convex polygon with an optional hole .
31,876
public static GeographyValue reverseLoops ( GeographyValue goodPolygon ) { List < List < GeographyPointValue > > newLoops = new ArrayList < > ( ) ; List < List < GeographyPointValue > > oldLoops = goodPolygon . getRings ( ) ; for ( List < GeographyPointValue > loop : oldLoops ) { List < GeographyPointValue > newLoop = new ArrayList < > ( ) ; newLoop . add ( loop . get ( 0 ) ) ; for ( int idx = loop . size ( ) - 2 ; idx > 1 ; idx -= 1 ) { newLoop . add ( loop . get ( idx ) ) ; } newLoops . add ( newLoop ) ; } return new GeographyValue ( newLoops ) ; }
Reverse all the loops in a polygon . Don t change the order of the loops just reverse each loop .
31,877
public void grant ( String granteeName , String roleName , Grantee grantor ) { Grantee grantee = get ( granteeName ) ; if ( grantee == null ) { throw Error . error ( ErrorCode . X_28501 , granteeName ) ; } if ( isImmutable ( granteeName ) ) { throw Error . error ( ErrorCode . X_28502 , granteeName ) ; } Grantee role = getRole ( roleName ) ; if ( role == null ) { throw Error . error ( ErrorCode . X_0P000 , roleName ) ; } if ( role == grantee ) { throw Error . error ( ErrorCode . X_0P501 , granteeName ) ; } if ( role . hasRole ( grantee ) ) { throw Error . error ( ErrorCode . X_0P501 , roleName ) ; } if ( ! grantor . isGrantable ( role ) ) { throw Error . error ( ErrorCode . X_0L000 , grantor . getNameString ( ) ) ; } grantee . grant ( role ) ; grantee . updateAllRights ( ) ; if ( grantee . isRole ) { updateAllRights ( grantee ) ; } }
Grant a role to this Grantee .
31,878
public void revoke ( String granteeName , String roleName , Grantee grantor ) { if ( ! grantor . isAdmin ( ) ) { throw Error . error ( ErrorCode . X_42507 ) ; } Grantee grantee = get ( granteeName ) ; if ( grantee == null ) { throw Error . error ( ErrorCode . X_28000 , granteeName ) ; } Grantee role = ( Grantee ) roleMap . get ( roleName ) ; grantee . revoke ( role ) ; grantee . updateAllRights ( ) ; if ( grantee . isRole ) { updateAllRights ( grantee ) ; } }
Revoke a role from a Grantee
31,879
void removeEmptyRole ( Grantee role ) { for ( int i = 0 ; i < map . size ( ) ; i ++ ) { Grantee grantee = ( Grantee ) map . get ( i ) ; grantee . roles . remove ( role ) ; } }
Removes a role without any privileges from all grantees
31,880
public void removeDbObject ( HsqlName name ) { for ( int i = 0 ; i < map . size ( ) ; i ++ ) { Grantee g = ( Grantee ) map . get ( i ) ; g . revokeDbObject ( name ) ; } }
Removes all rights mappings for the database object identified by the dbobject argument from all Grantee objects in the set .
31,881
void updateAllRights ( Grantee role ) { for ( int i = 0 ; i < map . size ( ) ; i ++ ) { Grantee grantee = ( Grantee ) map . get ( i ) ; if ( grantee . isRole ) { grantee . updateNestedRoles ( role ) ; } } for ( int i = 0 ; i < map . size ( ) ; i ++ ) { Grantee grantee = ( Grantee ) map . get ( i ) ; if ( ! grantee . isRole ) { grantee . updateAllRights ( ) ; } } }
First updates all ROLE Grantee objects . Then updates all USER Grantee Objects .
31,882
public Grantee getRole ( String name ) { Grantee g = ( Grantee ) roleMap . get ( name ) ; if ( g == null ) { throw Error . error ( ErrorCode . X_0P000 , name ) ; } return g ; }
Returns Grantee for the named Role
31,883
private void connect ( Session session , boolean withReadOnlyData ) { if ( ( dataSource . length ( ) == 0 ) || isConnected ) { return ; } PersistentStore store = database . persistentStoreCollection . getStore ( this ) ; this . store = store ; DataFileCache cache = null ; try { cache = ( TextCache ) database . logger . openTextCache ( this , dataSource , withReadOnlyData , isReversed ) ; store . setCache ( cache ) ; Row row = null ; int nextpos = 0 ; if ( ( ( TextCache ) cache ) . ignoreFirst ) { nextpos += ( ( TextCache ) cache ) . readHeaderLine ( ) ; } while ( true ) { row = ( Row ) store . get ( nextpos , false ) ; if ( row == null ) { break ; } Object [ ] data = row . getData ( ) ; nextpos = row . getPos ( ) + row . getStorageSize ( ) ; ( ( RowAVLDiskData ) row ) . setNewNodes ( ) ; systemUpdateIdentityValue ( data ) ; enforceRowConstraints ( session , data ) ; for ( int i = 0 ; i < indexList . length ; i ++ ) { indexList [ i ] . insert ( null , store , row ) ; } } } catch ( Exception e ) { int linenumber = cache == null ? 0 : ( ( TextCache ) cache ) . getLineNumber ( ) ; clearAllData ( session ) ; if ( cache != null ) { database . logger . closeTextCache ( this ) ; store . release ( ) ; } throw Error . error ( ErrorCode . TEXT_FILE , 0 , new Object [ ] { new Integer ( linenumber ) , e . getMessage ( ) } ) ; } isConnected = true ; isReadOnly = withReadOnlyData ; }
connects to the data source
31,884
public void disconnect ( ) { this . store = null ; PersistentStore store = database . persistentStoreCollection . getStore ( this ) ; store . release ( ) ; isConnected = false ; }
disconnects from the data source
31,885
private void openCache ( Session session , String dataSourceNew , boolean isReversedNew , boolean isReadOnlyNew ) { String dataSourceOld = dataSource ; boolean isReversedOld = isReversed ; boolean isReadOnlyOld = isReadOnly ; if ( dataSourceNew == null ) { dataSourceNew = "" ; } disconnect ( ) ; dataSource = dataSourceNew ; isReversed = ( isReversedNew && dataSource . length ( ) > 0 ) ; try { connect ( session , isReadOnlyNew ) ; } catch ( HsqlException e ) { dataSource = dataSourceOld ; isReversed = isReversedOld ; connect ( session , isReadOnlyOld ) ; throw e ; } }
This method does some of the work involved with managing the creation and openning of the cache the rest is done in Log . java and TextCache . java .
31,886
protected void setDataSource ( Session session , String dataSourceNew , boolean isReversedNew , boolean createFile ) { if ( getTableType ( ) == Table . TEMP_TEXT_TABLE ) { ; } else { session . getGrantee ( ) . checkSchemaUpdateOrGrantRights ( getSchemaName ( ) . name ) ; } dataSourceNew = dataSourceNew . trim ( ) ; if ( createFile && FileUtil . getDefaultInstance ( ) . exists ( dataSourceNew ) ) { throw Error . error ( ErrorCode . TEXT_SOURCE_EXISTS , dataSourceNew ) ; } if ( isReversedNew || ( isReversedNew != isReversed ) || ! dataSource . equals ( dataSourceNew ) || ! isConnected ) { openCache ( session , dataSourceNew , isReversedNew , isReadOnly ) ; } if ( isReversed ) { isReadOnly = true ; } }
High level command to assign a data source to the table definition . Reassigns only if the data source or direction has changed .
31,887
void checkDataReadOnly ( ) { if ( dataSource . length ( ) == 0 ) { throw Error . error ( ErrorCode . TEXT_TABLE_UNKNOWN_DATA_SOURCE ) ; } if ( isReadOnly ) { throw Error . error ( ErrorCode . DATA_IS_READONLY ) ; } }
Used by INSERT DELETE UPDATE operations . This class will return a more appropriate message when there is no data source .
31,888
public void addBatch ( ) throws SQLException { checkClosed ( ) ; if ( this . Query . isOfType ( VoltSQL . TYPE_EXEC , VoltSQL . TYPE_SELECT ) ) { throw SQLError . get ( SQLError . ILLEGAL_STATEMENT , this . Query . toSqlString ( ) ) ; } this . addBatch ( this . Query . getExecutableQuery ( this . parameters ) ) ; this . parameters = this . Query . getParameterArray ( ) ; }
Adds a set of parameters to this PreparedStatement object s batch of commands .
31,889
public boolean execute ( ) throws SQLException { checkClosed ( ) ; boolean result = this . execute ( this . Query . getExecutableQuery ( this . parameters ) ) ; this . parameters = this . Query . getParameterArray ( ) ; return result ; }
Executes the SQL statement in this PreparedStatement object which may be any kind of SQL statement .
31,890
public ResultSet executeQuery ( ) throws SQLException { checkClosed ( ) ; if ( ! this . Query . isOfType ( VoltSQL . TYPE_EXEC , VoltSQL . TYPE_SELECT ) ) { throw SQLError . get ( SQLError . ILLEGAL_STATEMENT , this . Query . toSqlString ( ) ) ; } ResultSet result = this . executeQuery ( this . Query . getExecutableQuery ( this . parameters ) ) ; this . parameters = this . Query . getParameterArray ( ) ; return result ; }
Executes the SQL query in this PreparedStatement object and returns the ResultSet object generated by the query .
31,891
public void setArray ( int parameterIndex , Array x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; throw SQLError . noSupport ( ) ; }
Sets the designated parameter to the given java . sql . Array object .
31,892
public void setByte ( int parameterIndex , byte x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
Sets the designated parameter to the given Java byte value .
31,893
public void setBytes ( int parameterIndex , byte [ ] x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
Sets the designated parameter to the given Java array of bytes .
31,894
public void setCharacterStream ( int parameterIndex , Reader reader ) throws SQLException { checkParameterBounds ( parameterIndex ) ; throw SQLError . noSupport ( ) ; }
Sets the designated parameter to the given Reader object .
31,895
public void setDouble ( int parameterIndex , double x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
Sets the designated parameter to the given Java double value .
31,896
public void setFloat ( int parameterIndex , float x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = ( double ) x ; }
Sets the designated parameter to the given Java float value .
31,897
public void setInt ( int parameterIndex , int x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
Sets the designated parameter to the given Java int value .
31,898
public void setLong ( int parameterIndex , long x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
Sets the designated parameter to the given Java long value .
31,899
public void setNString ( int parameterIndex , String value ) throws SQLException { checkParameterBounds ( parameterIndex ) ; throw SQLError . noSupport ( ) ; }
Sets the designated paramter to the given String object .