idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
32,300
private int binarySlotSearch ( ) { int low = 0 ; int high = count ; int mid = 0 ; int compare = 0 ; while ( low < high ) { mid = ( low + high ) / 2 ; compare = compare ( mid ) ; if ( compare <= 0 ) { high = mid ; } else { low = mid + 1 ; } } return low ; }
Returns the index of the lowest element > = the given search target or count
32,301
private int binaryEmptySlotSearch ( ) { int low = 0 ; int high = count ; int mid = 0 ; int compare = 0 ; while ( low < high ) { mid = ( low + high ) / 2 ; compare = compare ( mid ) ; if ( compare < 0 ) { high = mid ; } else if ( compare > 0 ) { low = mid + 1 ; } else { return - 1 ; } } return low ; }
Returns the index of the lowest element > the given search target or count or - 1 if target is found
32,302
private int compare ( int i ) { if ( sortOnValues ) { if ( targetSearchValue > values [ i ] ) { return 1 ; } else if ( targetSearchValue < values [ i ] ) { return - 1 ; } } else { if ( targetSearchValue > keys [ i ] ) { return 1 ; } else if ( targetSearchValue < keys [ i ] ) { return - 1 ; } } return 0 ; }
Check if targeted column value in the row indexed i is less than the search target object .
32,303
private boolean lessThan ( int i , int j ) { if ( sortOnValues ) { if ( values [ i ] < values [ j ] ) { return true ; } } else { if ( keys [ i ] < keys [ j ] ) { return true ; } } return false ; }
Check if row indexed i is less than row indexed j
32,304
public static void setFontSize ( String inFontSize ) { Float stageFloat = new Float ( inFontSize ) ; float fontSize = stageFloat . floatValue ( ) ; Font fonttTree = fOwner . tTree . getFont ( ) . deriveFont ( fontSize ) ; fOwner . tTree . setFont ( fonttTree ) ; Font fontTxtCommand = fOwner . txtCommand . getFont ( ) . deriveFont ( fontSize ) ; fOwner . txtCommand . setFont ( fontTxtCommand ) ; Font fontTxtResult = fOwner . txtResult . getFont ( ) . deriveFont ( fontSize ) ; fOwner . txtResult . setFont ( fontTxtResult ) ; }
Displays a color chooser and Sets the selected color .
32,305
public Host lookup ( final String hostName ) { final Map < String , Host > cache = this . refresh ( ) ; Host h = cache . get ( hostName ) ; if ( h == null ) { h = new Host ( ) ; } if ( h . patternsApplied ) { return h ; } for ( final Map . Entry < String , Host > e : cache . entrySet ( ) ) { if ( ! isHostPattern ( e . getKey ( ) ) ) { continue ; } if ( ! isHostMatch ( e . getKey ( ) , hostName ) ) { continue ; } h . copyFrom ( e . getValue ( ) ) ; } if ( h . port == 0 ) { h . port = - 1 ; } h . patternsApplied = true ; return h ; }
Locate the configuration for a specific host request .
32,306
public static ListeningExecutorService getCachedSingleThreadExecutor ( String name , long keepAlive ) { return MoreExecutors . listeningDecorator ( new ThreadPoolExecutor ( 0 , 1 , keepAlive , TimeUnit . MILLISECONDS , new LinkedBlockingQueue < Runnable > ( ) , CoreUtils . getThreadFactory ( null , name , SMALL_STACK_SIZE , false , null ) ) ) ; }
Get a single thread executor that caches its thread meaning that the thread will terminate after keepAlive milliseconds . A new thread will be created the next time a task arrives and that will be kept around for keepAlive milliseconds . On creation no thread is allocated the first task creates a thread .
32,307
public static ListeningExecutorService getBoundedSingleThreadExecutor ( String name , int capacity ) { LinkedBlockingQueue < Runnable > lbq = new LinkedBlockingQueue < Runnable > ( capacity ) ; ThreadPoolExecutor tpe = new ThreadPoolExecutor ( 1 , 1 , 0L , TimeUnit . MILLISECONDS , lbq , CoreUtils . getThreadFactory ( name ) ) ; return MoreExecutors . listeningDecorator ( tpe ) ; }
Create a bounded single threaded executor that rejects requests if more than capacity requests are outstanding .
32,308
public static ThreadPoolExecutor getBoundedThreadPoolExecutor ( int maxPoolSize , long keepAliveTime , TimeUnit unit , ThreadFactory tFactory ) { return new ThreadPoolExecutor ( 0 , maxPoolSize , keepAliveTime , unit , new SynchronousQueue < Runnable > ( ) , tFactory ) ; }
Create a bounded thread pool executor . The work queue is synchronous and can cause RejectedExecutionException if there is no available thread to take a new task .
32,309
public static ExecutorService getQueueingExecutorService ( final Queue < Runnable > taskQueue ) { return new ExecutorService ( ) { public void execute ( Runnable command ) { taskQueue . offer ( command ) ; } public void shutdown ( ) { throw new UnsupportedOperationException ( ) ; } public List < Runnable > shutdownNow ( ) { throw new UnsupportedOperationException ( ) ; } public boolean isShutdown ( ) { return false ; } public boolean isTerminated ( ) { return false ; } public boolean awaitTermination ( long timeout , TimeUnit unit ) throws InterruptedException { return true ; } public < T > Future < T > submit ( Callable < T > task ) { Preconditions . checkNotNull ( task ) ; FutureTask < T > retval = new FutureTask < T > ( task ) ; taskQueue . offer ( retval ) ; return retval ; } public < T > Future < T > submit ( Runnable task , T result ) { Preconditions . checkNotNull ( task ) ; FutureTask < T > retval = new FutureTask < T > ( task , result ) ; taskQueue . offer ( retval ) ; return retval ; } public Future < ? > submit ( Runnable task ) { Preconditions . checkNotNull ( task ) ; ListenableFutureTask < Object > retval = ListenableFutureTask . create ( task , null ) ; taskQueue . offer ( retval ) ; return retval ; } public < T > List < Future < T > > invokeAll ( Collection < ? extends Callable < T > > tasks ) throws InterruptedException { throw new UnsupportedOperationException ( ) ; } public < T > List < Future < T > > invokeAll ( Collection < ? extends Callable < T > > tasks , long timeout , TimeUnit unit ) throws InterruptedException { throw new UnsupportedOperationException ( ) ; } public < T > T invokeAny ( Collection < ? extends Callable < T > > tasks ) throws InterruptedException , ExecutionException { throw new UnsupportedOperationException ( ) ; } public < T > T invokeAny ( Collection < ? extends Callable < T > > tasks , long timeout , TimeUnit unit ) throws InterruptedException , ExecutionException , TimeoutException { throw new UnsupportedOperationException ( ) ; } } ; }
Create an ExceutorService that places tasks in an existing task queue for execution . Used to create a bridge for using ListenableFutures in classes already built around a queue .
32,310
public static ThreadFactory getThreadFactory ( final String groupName , final String name , final int stackSize , final boolean incrementThreadNames , final Queue < String > coreList ) { ThreadGroup group = null ; if ( groupName != null ) { group = new ThreadGroup ( Thread . currentThread ( ) . getThreadGroup ( ) , groupName ) ; } final ThreadGroup finalGroup = group ; return new ThreadFactory ( ) { private final AtomicLong m_createdThreadCount = new AtomicLong ( 0 ) ; private final ThreadGroup m_group = finalGroup ; public synchronized Thread newThread ( final Runnable r ) { final String threadName = name + ( incrementThreadNames ? " - " + m_createdThreadCount . getAndIncrement ( ) : "" ) ; String coreTemp = null ; if ( coreList != null && ! coreList . isEmpty ( ) ) { coreTemp = coreList . poll ( ) ; } final String core = coreTemp ; Runnable runnable = new Runnable ( ) { public void run ( ) { if ( core != null ) { } try { r . run ( ) ; } catch ( Throwable t ) { new VoltLogger ( "HOST" ) . error ( "Exception thrown in thread " + threadName , t ) ; } finally { m_threadLocalDeallocator . run ( ) ; } } } ; Thread t = new Thread ( m_group , runnable , threadName , stackSize ) ; t . setDaemon ( true ) ; return t ; } } ; }
Creates a thread factory that creates threads within a thread group if the group name is given . The threads created will catch any unhandled exceptions and log them to the HOST logger .
32,311
public static String getHostnameOrAddress ( ) { final InetAddress addr = m_localAddressSupplier . get ( ) ; if ( addr == null ) return "" ; return ReverseDNSCache . hostnameOrAddress ( addr ) ; }
Return the local hostname if it s resolvable . If not return the IPv4 address on the first interface we find if it exists . If not returns whatever address exists on the first interface .
32,312
public static final < T > ListenableFuture < T > retryHelper ( final ScheduledExecutorService ses , final ExecutorService es , final Callable < T > callable , final long maxAttempts , final long startInterval , final TimeUnit startUnit , final long maxInterval , final TimeUnit maxUnit ) { SettableFuture < T > future = SettableFuture . create ( ) ; retryHelper ( ses , es , callable , maxAttempts , startInterval , startUnit , maxInterval , maxUnit , future ) ; return future ; }
A helper for retrying tasks asynchronously returns a settable future that can be used to attempt to cancel the task .
32,313
public static < K extends Comparable < ? super K > , V extends Comparable < ? super V > > List < Entry < K , V > > sortKeyValuePairByValue ( Map < K , V > map ) { List < Map . Entry < K , V > > entries = new ArrayList < Map . Entry < K , V > > ( map . entrySet ( ) ) ; Collections . sort ( entries , new Comparator < Map . Entry < K , V > > ( ) { public int compare ( Entry < K , V > o1 , Entry < K , V > o2 ) { if ( ! o1 . getValue ( ) . equals ( o2 . getValue ( ) ) ) { return ( o1 . getValue ( ) ) . compareTo ( o2 . getValue ( ) ) ; } return o1 . getKey ( ) . compareTo ( o2 . getKey ( ) ) ; } } ) ; return entries ; }
Utility method to sort the keys and values of a map by their value .
32,314
private static NodeAVL set ( PersistentStore store , NodeAVL x , boolean isleft , NodeAVL n ) { if ( isleft ) { x = x . setLeft ( store , n ) ; } else { x = x . setRight ( store , n ) ; } if ( n != null ) { n . setParent ( store , x ) ; } return x ; }
Set a node as child of another
32,315
private static NodeAVL child ( PersistentStore store , NodeAVL x , boolean isleft ) { return isleft ? x . getLeft ( store ) : x . getRight ( store ) ; }
Returns either child node
32,316
public static int compareRows ( Object [ ] a , Object [ ] b , int [ ] cols , Type [ ] coltypes ) { int fieldcount = cols . length ; for ( int j = 0 ; j < fieldcount ; j ++ ) { int i = coltypes [ cols [ j ] ] . compare ( a [ cols [ j ] ] , b [ cols [ j ] ] ) ; if ( i != 0 ) { return i ; } } return 0 ; }
compares two full table rows based on a set of columns
32,317
public int size ( PersistentStore store ) { int count = 0 ; readLock . lock ( ) ; try { RowIterator it = firstRow ( null , store ) ; while ( it . hasNext ( ) ) { it . getNextRow ( ) ; count ++ ; } return count ; } finally { readLock . unlock ( ) ; } }
Returns the node count .
32,318
public void insert ( Session session , PersistentStore store , Row row ) { NodeAVL n ; NodeAVL x ; boolean isleft = true ; int compare = - 1 ; writeLock . lock ( ) ; try { n = getAccessor ( store ) ; x = n ; if ( n == null ) { store . setAccessor ( this , ( ( RowAVL ) row ) . getNode ( position ) ) ; return ; } while ( true ) { Row currentRow = n . getRow ( store ) ; compare = compareRowForInsertOrDelete ( session , row , currentRow ) ; if ( compare == 0 ) { throw Error . error ( ErrorCode . X_23505 ) ; } isleft = compare < 0 ; x = n ; n = child ( store , x , isleft ) ; if ( n == null ) { break ; } } x = set ( store , x , isleft , ( ( RowAVL ) row ) . getNode ( position ) ) ; balance ( store , x , isleft ) ; } finally { writeLock . unlock ( ) ; } }
Insert a node into the index
32,319
public RowIterator findFirstRow ( Session session , PersistentStore store , Object [ ] rowdata , int match ) { NodeAVL node = findNode ( session , store , rowdata , defaultColMap , match ) ; return getIterator ( session , store , node ) ; }
Return the first node equal to the indexdata object . The rowdata has the same column mapping as this index .
32,320
public RowIterator findFirstRow ( Session session , PersistentStore store , Object [ ] rowdata ) { NodeAVL node = findNode ( session , store , rowdata , colIndex , colIndex . length ) ; return getIterator ( session , store , node ) ; }
Return the first node equal to the rowdata object . The rowdata has the same column mapping as this table .
32,321
public RowIterator findFirstRow ( Session session , PersistentStore store , Object value , int compare ) { readLock . lock ( ) ; try { if ( compare == OpTypes . SMALLER || compare == OpTypes . SMALLER_EQUAL ) { return findFirstRowNotNull ( session , store ) ; } boolean isEqual = compare == OpTypes . EQUAL || compare == OpTypes . IS_NULL ; NodeAVL x = getAccessor ( store ) ; int iTest = 1 ; if ( compare == OpTypes . GREATER ) { iTest = 0 ; } if ( value == null && ! isEqual ) { return emptyIterator ; } boolean check = compare == OpTypes . GREATER || compare == OpTypes . EQUAL || compare == OpTypes . GREATER_EQUAL ; if ( ! check ) { Error . runtimeError ( ErrorCode . U_S0500 , "Index.findFirst" ) ; } while ( x != null ) { boolean t = colTypes [ 0 ] . compare ( value , x . getRow ( store ) . getData ( ) [ colIndex [ 0 ] ] ) >= iTest ; if ( t ) { NodeAVL r = x . getRight ( store ) ; if ( r == null ) { break ; } x = r ; } else { NodeAVL l = x . getLeft ( store ) ; if ( l == null ) { break ; } x = l ; } } while ( x != null ) { Object colvalue = x . getRow ( store ) . getData ( ) [ colIndex [ 0 ] ] ; int result = colTypes [ 0 ] . compare ( value , colvalue ) ; if ( result >= iTest ) { x = next ( store , x ) ; } else { if ( isEqual ) { if ( result != 0 ) { x = null ; } } else if ( colvalue == null ) { x = next ( store , x ) ; continue ; } break ; } } if ( session == null || x == null ) { return getIterator ( session , store , x ) ; } while ( x != null ) { Row row = x . getRow ( store ) ; if ( compare == OpTypes . EQUAL && colTypes [ 0 ] . compare ( value , row . getData ( ) [ colIndex [ 0 ] ] ) != 0 ) { x = null ; break ; } if ( session . database . txManager . canRead ( session , row ) ) { break ; } x = next ( store , x ) ; } return getIterator ( session , store , x ) ; } finally { readLock . unlock ( ) ; } }
Finds the first node that is larger or equal to the given one based on the first column of the index only .
32,322
public RowIterator findFirstRowNotNull ( Session session , PersistentStore store ) { readLock . lock ( ) ; try { NodeAVL x = getAccessor ( store ) ; while ( x != null ) { boolean t = colTypes [ 0 ] . compare ( null , x . getRow ( store ) . getData ( ) [ colIndex [ 0 ] ] ) >= 0 ; if ( t ) { NodeAVL r = x . getRight ( store ) ; if ( r == null ) { break ; } x = r ; } else { NodeAVL l = x . getLeft ( store ) ; if ( l == null ) { break ; } x = l ; } } while ( x != null ) { Object colvalue = x . getRow ( store ) . getData ( ) [ colIndex [ 0 ] ] ; if ( colvalue == null ) { x = next ( store , x ) ; } else { break ; } } while ( session != null && x != null ) { Row row = x . getRow ( store ) ; if ( session . database . txManager . canRead ( session , row ) ) { break ; } x = next ( store , x ) ; } return getIterator ( session , store , x ) ; } finally { readLock . unlock ( ) ; } }
Finds the first node where the data is not null .
32,323
public RowIterator firstRow ( Session session , PersistentStore store ) { int tempDepth = 0 ; readLock . lock ( ) ; try { NodeAVL x = getAccessor ( store ) ; NodeAVL l = x ; while ( l != null ) { x = l ; l = x . getLeft ( store ) ; tempDepth ++ ; } while ( session != null && x != null ) { Row row = x . getRow ( store ) ; if ( session . database . txManager . canRead ( session , row ) ) { break ; } x = next ( store , x ) ; } return getIterator ( session , store , x ) ; } finally { depth = tempDepth ; readLock . unlock ( ) ; } }
Returns the row for the first node of the index
32,324
public Row lastRow ( Session session , PersistentStore store ) { readLock . lock ( ) ; try { NodeAVL x = getAccessor ( store ) ; NodeAVL l = x ; while ( l != null ) { x = l ; l = x . getRight ( store ) ; } while ( session != null && x != null ) { Row row = x . getRow ( store ) ; if ( session . database . txManager . canRead ( session , row ) ) { break ; } x = last ( store , x ) ; } return x == null ? null : x . getRow ( store ) ; } finally { readLock . unlock ( ) ; } }
Returns the row for the last node of the index
32,325
private NodeAVL next ( Session session , PersistentStore store , NodeAVL x ) { if ( x == null ) { return null ; } readLock . lock ( ) ; try { while ( true ) { x = next ( store , x ) ; if ( x == null ) { return x ; } if ( session == null ) { return x ; } Row row = x . getRow ( store ) ; if ( session . database . txManager . canRead ( session , row ) ) { return x ; } } } finally { readLock . unlock ( ) ; } }
Returns the node after the given one
32,326
private void replace ( PersistentStore store , NodeAVL x , NodeAVL n ) { if ( x . isRoot ( ) ) { if ( n != null ) { n = n . setParent ( store , null ) ; } store . setAccessor ( this , n ) ; } else { set ( store , x . getParent ( store ) , x . isFromLeft ( store ) , n ) ; } }
Replace x with n
32,327
public int compareRowNonUnique ( Object [ ] a , Object [ ] b , int fieldcount ) { for ( int j = 0 ; j < fieldcount ; j ++ ) { int i = colTypes [ j ] . compare ( a [ j ] , b [ colIndex [ j ] ] ) ; if ( i != 0 ) { return i ; } } return 0 ; }
As above but use the index column data
32,328
private int compareRowForInsertOrDelete ( Session session , Row newRow , Row existingRow ) { Object [ ] a = newRow . getData ( ) ; Object [ ] b = existingRow . getData ( ) ; int j = 0 ; boolean hasNull = false ; for ( ; j < colIndex . length ; j ++ ) { Object currentvalue = a [ colIndex [ j ] ] ; Object othervalue = b [ colIndex [ j ] ] ; int i = colTypes [ j ] . compare ( currentvalue , othervalue ) ; boolean nulls = currentvalue == null || othervalue == null ; if ( i != 0 ) { if ( colDesc [ j ] && ! nulls ) { i = - i ; } if ( nullsLast [ j ] && nulls ) { i = - i ; } return i ; } if ( currentvalue == null ) { hasNull = true ; } } if ( isUnique && ! useRowId && ! hasNull ) { if ( session == null || session . database . txManager . canRead ( session , existingRow ) ) { return 0 ; } else { int difference = newRow . getPos ( ) - existingRow . getPos ( ) ; return difference ; } } for ( j = 0 ; j < pkCols . length ; j ++ ) { Object currentvalue = a [ pkCols [ j ] ] ; int i = pkTypes [ j ] . compare ( currentvalue , b [ pkCols [ j ] ] ) ; if ( i != 0 ) { return i ; } } if ( useRowId ) { int difference = newRow . getPos ( ) - existingRow . getPos ( ) ; if ( difference < 0 ) { difference = - 1 ; } else if ( difference > 0 ) { difference = 1 ; } return difference ; } if ( session == null || session . database . txManager . canRead ( session , existingRow ) ) { return 0 ; } else { int difference = newRow . getPos ( ) - existingRow . getPos ( ) ; if ( difference < 0 ) { difference = - 1 ; } else if ( difference > 0 ) { difference = 1 ; } return difference ; } }
Compare two rows of the table for inserting rows into unique indexes Supports descending columns .
32,329
private NodeAVL findNode ( Session session , PersistentStore store , Object [ ] rowdata , int [ ] rowColMap , int fieldCount ) { readLock . lock ( ) ; try { NodeAVL x = getAccessor ( store ) ; NodeAVL n ; NodeAVL result = null ; while ( x != null ) { int i = this . compareRowNonUnique ( rowdata , rowColMap , x . getRow ( store ) . getData ( ) , fieldCount ) ; if ( i == 0 ) { result = x ; n = x . getLeft ( store ) ; } else if ( i > 0 ) { n = x . getRight ( store ) ; } else { n = x . getLeft ( store ) ; } if ( n == null ) { break ; } x = n ; } if ( session == null ) { return result ; } while ( result != null ) { Row row = result . getRow ( store ) ; if ( compareRowNonUnique ( rowdata , rowColMap , row . getData ( ) , fieldCount ) != 0 ) { result = null ; break ; } if ( session . database . txManager . canRead ( session , row ) ) { break ; } result = next ( store , result ) ; } return result ; } finally { readLock . unlock ( ) ; } }
Finds a match with a row from a different table
32,330
private void balance ( PersistentStore store , NodeAVL x , boolean isleft ) { while ( true ) { int sign = isleft ? 1 : - 1 ; switch ( x . getBalance ( ) * sign ) { case 1 : x = x . setBalance ( store , 0 ) ; return ; case 0 : x = x . setBalance ( store , - sign ) ; break ; case - 1 : NodeAVL l = child ( store , x , isleft ) ; if ( l . getBalance ( ) == - sign ) { replace ( store , x , l ) ; x = set ( store , x , isleft , child ( store , l , ! isleft ) ) ; l = set ( store , l , ! isleft , x ) ; x = x . setBalance ( store , 0 ) ; l = l . setBalance ( store , 0 ) ; } else { NodeAVL r = child ( store , l , ! isleft ) ; replace ( store , x , r ) ; l = set ( store , l , ! isleft , child ( store , r , isleft ) ) ; r = set ( store , r , isleft , l ) ; x = set ( store , x , isleft , child ( store , r , ! isleft ) ) ; r = set ( store , r , ! isleft , x ) ; int rb = r . getBalance ( ) ; x = x . setBalance ( store , ( rb == - sign ) ? sign : 0 ) ; l = l . setBalance ( store , ( rb == sign ) ? - sign : 0 ) ; r = r . setBalance ( store , 0 ) ; } return ; } if ( x . isRoot ( ) ) { return ; } isleft = x . isFromLeft ( store ) ; x = x . getParent ( store ) ; } }
Balances part of the tree after an alteration to the index .
32,331
public AbstractExpression resolveTVE ( TupleValueExpression tve ) { AbstractExpression resolvedExpr = processTVE ( tve , tve . getColumnName ( ) ) ; List < TupleValueExpression > tves = ExpressionUtil . getTupleValueExpressions ( resolvedExpr ) ; for ( TupleValueExpression subqTve : tves ) { resolveLeafTve ( subqTve ) ; } return resolvedExpr ; }
The parameter tve is a column reference obtained by parsing a column ref VoltXML element . We need to find out to which column in the current table scan the name of the TVE refers and transfer metadata from the schema s column to the tve . The function processTVE does the transfer .
32,332
void resolveColumnIndexesUsingSchema ( NodeSchema inputSchema ) { int difftor = 0 ; for ( SchemaColumn col : m_outputSchema ) { col . setDifferentiator ( difftor ) ; ++ difftor ; Collection < TupleValueExpression > allTves = ExpressionUtil . getTupleValueExpressions ( col . getExpression ( ) ) ; for ( TupleValueExpression tve : allTves ) { tve . setColumnIndexUsingSchema ( inputSchema ) ; assert ( tve . getColumnIndex ( ) >= 0 && tve . getColumnIndex ( ) < inputSchema . size ( ) ) ; } } }
Given an input schema resolve all the TVEs in all the output column expressions . This method is necessary to be able to do this for inlined projection nodes that don t have a child from which they can get an output schema .
32,333
public boolean isIdentity ( AbstractPlanNode childNode ) throws PlanningErrorException { assert ( childNode != null ) ; NodeSchema childSchema = childNode . getTrueOutputSchema ( false ) ; assert ( childSchema != null ) ; NodeSchema outputSchema = getOutputSchema ( ) ; if ( outputSchema . size ( ) != childSchema . size ( ) ) { return false ; } for ( int idx = 0 ; idx < outputSchema . size ( ) ; idx += 1 ) { SchemaColumn col = outputSchema . getColumn ( idx ) ; SchemaColumn childCol = childSchema . getColumn ( idx ) ; if ( col . getValueType ( ) != childCol . getValueType ( ) ) { return false ; } if ( ! ( col . getExpression ( ) instanceof TupleValueExpression ) ) { return false ; } if ( ! ( childCol . getExpression ( ) instanceof TupleValueExpression ) ) { return false ; } TupleValueExpression tve = ( TupleValueExpression ) col . getExpression ( ) ; if ( tve . getColumnIndex ( ) != idx ) { return false ; } } return true ; }
Return true if this node unneeded if its input schema is the given one .
32,334
public void replaceChildOutputSchemaNames ( AbstractPlanNode child ) { NodeSchema childSchema = child . getTrueOutputSchema ( false ) ; NodeSchema mySchema = getOutputSchema ( ) ; assert ( childSchema . size ( ) == mySchema . size ( ) ) ; for ( int idx = 0 ; idx < childSchema . size ( ) ; idx += 1 ) { SchemaColumn cCol = childSchema . getColumn ( idx ) ; SchemaColumn myCol = mySchema . getColumn ( idx ) ; assert ( cCol . getValueType ( ) == myCol . getValueType ( ) ) ; assert ( cCol . getExpression ( ) instanceof TupleValueExpression ) ; assert ( myCol . getExpression ( ) instanceof TupleValueExpression ) ; cCol . reset ( myCol . getTableName ( ) , myCol . getTableAlias ( ) , myCol . getColumnName ( ) , myCol . getColumnAlias ( ) ) ; } }
Replace the column names output schema of the child node with the output schema column names of this node . We use this when we delete an unnecessary projection node . We only need to make sure the column names are changed since we will have checked carefully that everything else is the same .
32,335
void deliverToRepairLog ( VoltMessage msg ) { assert ( Thread . currentThread ( ) . getId ( ) == m_taskThreadId ) ; m_repairLog . deliver ( msg ) ; }
when the MpScheduler needs to log the completion of a transaction to its local repair log
32,336
private void sendInternal ( long destHSId , VoltMessage message ) { message . m_sourceHSId = getHSId ( ) ; m_messenger . send ( destHSId , message ) ; }
have a serialized order to all hosts .
32,337
public static ClientAffinityStats diff ( ClientAffinityStats newer , ClientAffinityStats older ) { if ( newer . m_partitionId != older . m_partitionId ) { throw new IllegalArgumentException ( "Can't diff these ClientAffinityStats instances." ) ; } ClientAffinityStats retval = new ClientAffinityStats ( older . m_partitionId , newer . m_affinityWrites - older . m_affinityWrites , newer . m_rrWrites - older . m_rrWrites , newer . m_affinityReads - older . m_affinityReads , newer . m_rrReads - older . m_rrReads ) ; return retval ; }
Subtract one ClientAffinityStats instance from another to produce a third .
32,338
private int addFramesForCompleteMessage ( ) { boolean added = false ; EncryptFrame frame = null ; int delta = 0 ; while ( ! added && ( frame = m_encryptedFrames . poll ( ) ) != null ) { if ( ! frame . isLast ( ) ) { synchronized ( m_partialMessages ) { m_partialMessages . add ( frame ) ; ++ m_partialSize ; } continue ; } final int partialSize = m_partialSize ; if ( partialSize > 0 ) { assert frame . chunks == partialSize + 1 : "partial frame buildup has wrong number of preceding pieces" ; synchronized ( m_partialMessages ) { for ( EncryptFrame frm : m_partialMessages ) { m_encryptedMessages . addComponent ( true , frm . frame ) ; delta += frm . delta ; } m_partialMessages . clear ( ) ; m_partialSize = 0 ; } } m_encryptedMessages . addComponent ( true , frame . frame ) ; delta += frame . delta ; m_numEncryptedMessages += frame . msgs ; added = true ; } return added ? delta : - 1 ; }
Gather all the frames that comprise a whole Volt Message Returns the delta between the original message byte count and encrypted message byte count .
32,339
void shutdown ( ) { m_isShutdown = true ; try { int waitFor = 1 - Math . min ( m_inFlight . availablePermits ( ) , - 4 ) ; for ( int i = 0 ; i < waitFor ; ++ i ) { try { if ( m_inFlight . tryAcquire ( 1 , TimeUnit . SECONDS ) ) { m_inFlight . release ( ) ; break ; } } catch ( InterruptedException e ) { break ; } } m_ecryptgw . die ( ) ; EncryptFrame frame = null ; while ( ( frame = m_encryptedFrames . poll ( ) ) != null ) { frame . frame . release ( ) ; } for ( EncryptFrame ef : m_partialMessages ) { ef . frame . release ( ) ; } m_partialMessages . clear ( ) ; if ( m_encryptedMessages . refCnt ( ) > 0 ) m_encryptedMessages . release ( ) ; } finally { m_inFlight . drainPermits ( ) ; m_inFlight . release ( ) ; } }
Called from synchronized block only
32,340
private Runnable createCompletionTask ( final Mailbox mb ) { return new Runnable ( ) { public void run ( ) { VoltDB . instance ( ) . getHostMessenger ( ) . removeMailbox ( mb . getHSId ( ) ) ; } } ; }
Remove the mailbox from the host messenger after all data targets are done .
32,341
private Callable < Boolean > coalesceTruncationSnapshotPlan ( String file_path , String pathType , String file_nonce , long txnId , Map < Integer , Long > partitionTransactionIds , SystemProcedureExecutionContext context , VoltTable result , ExtensibleSnapshotDigestData extraSnapshotData , SiteTracker tracker , HashinatorSnapshotData hashinatorData , long timestamp , int newPartitionCount ) { final NativeSnapshotWritePlan plan = new NativeSnapshotWritePlan ( ) ; final Callable < Boolean > deferredTruncationSetup = plan . createSetupInternal ( file_path , pathType , file_nonce , txnId , partitionTransactionIds , new SnapshotRequestConfig ( newPartitionCount , context . getDatabase ( ) ) , context , result , extraSnapshotData , tracker , hashinatorData , timestamp ) ; m_taskListsForHSIds . putAll ( plan . m_taskListsForHSIds ) ; return new Callable < Boolean > ( ) { public Boolean call ( ) throws Exception { final Boolean retval = deferredTruncationSetup . call ( ) ; m_targets . addAll ( plan . m_targets ) ; return retval ; } } ; }
NativeSnapshotWritePlan to include all tables .
32,342
void killSocket ( ) { try { m_closing = true ; m_socket . setKeepAlive ( false ) ; m_socket . setSoLinger ( false , 0 ) ; Thread . sleep ( 25 ) ; m_socket . close ( ) ; Thread . sleep ( 25 ) ; System . gc ( ) ; Thread . sleep ( 25 ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } }
Used only for test code to kill this FH
32,343
void send ( final long destinations [ ] , final VoltMessage message ) { if ( ! m_isUp ) { hostLog . warn ( "Failed to send VoltMessage because connection to host " + CoreUtils . getHostIdFromHSId ( destinations [ 0 ] ) + " is closed" ) ; return ; } if ( destinations . length == 0 ) { return ; } if ( ! m_linkCutForTest . get ( ) ) { m_network . enqueue ( new DeferredSerialization ( ) { public final void serialize ( final ByteBuffer buf ) throws IOException { buf . putInt ( buf . capacity ( ) - 4 ) ; buf . putLong ( message . m_sourceHSId ) ; buf . putInt ( destinations . length ) ; for ( int ii = 0 ; ii < destinations . length ; ii ++ ) { buf . putLong ( destinations [ ii ] ) ; } message . flattenToBuffer ( buf ) ; buf . flip ( ) ; } public final void cancel ( ) { } public String toString ( ) { return message . getClass ( ) . getName ( ) ; } public int getSerializedSize ( ) { final int len = 4 + 8 + 4 + 8 * destinations . length + message . getSerializedSize ( ) ; return len ; } } ) ; } long current_time = EstTime . currentTimeMillis ( ) ; long current_delta = current_time - m_lastMessageMillis . get ( ) ; if ( isPrimary ( ) && current_delta > m_logRate ) { rateLimitedLogger . log ( "Have not received a message from host " + hostnameAndIPAndPort ( ) + " for " + ( current_delta / 1000.0 ) + " seconds" , current_time ) ; } if ( ( ! m_closing && m_isUp ) && isPrimary ( ) && current_delta > m_deadHostTimeout ) { if ( m_deadReportsCount . getAndIncrement ( ) == 0 ) { hostLog . error ( "DEAD HOST DETECTED, hostname: " + hostnameAndIPAndPort ( ) ) ; hostLog . info ( "\tcurrent time: " + current_time ) ; hostLog . info ( "\tlast message: " + m_lastMessageMillis ) ; hostLog . info ( "\tdelta (millis): " + current_delta ) ; hostLog . info ( "\ttimeout value (millis): " + m_deadHostTimeout ) ; VoltDB . dropStackTrace ( "Timed out foreign host " + hostnameAndIPAndPort ( ) + " with delta " + current_delta ) ; } m_hostMessenger . reportForeignHostFailed ( m_hostId ) ; } }
Send a message to the network . This public method is re - entrant .
32,344
private void deliverMessage ( long destinationHSId , VoltMessage message ) { if ( ! m_hostMessenger . validateForeignHostId ( m_hostId ) ) { hostLog . warn ( String . format ( "Message (%s) sent to site id: %s @ (%s) at %d from %s " + "which is a known failed host. The message will be dropped\n" , message . getClass ( ) . getSimpleName ( ) , CoreUtils . hsIdToString ( destinationHSId ) , m_socket . getRemoteSocketAddress ( ) . toString ( ) , m_hostMessenger . getHostId ( ) , CoreUtils . hsIdToString ( message . m_sourceHSId ) ) ) ; return ; } Mailbox mailbox = m_hostMessenger . getMailbox ( destinationHSId ) ; if ( mailbox == null ) { hostLog . info ( String . format ( "Message (%s) sent to unknown site id: %s @ (%s) at %d from %s \n" , message . getClass ( ) . getSimpleName ( ) , CoreUtils . hsIdToString ( destinationHSId ) , m_socket . getRemoteSocketAddress ( ) . toString ( ) , m_hostMessenger . getHostId ( ) , CoreUtils . hsIdToString ( message . m_sourceHSId ) ) ) ; if ( m_hostMessenger . getHostId ( ) != ( int ) destinationHSId ) { VoltDB . crashLocalVoltDB ( "Received a message at wrong host" , false , null ) ; } return ; } mailbox . deliver ( message ) ; }
Deliver a deserialized message from the network to a local mailbox
32,345
private void handleRead ( ByteBuffer in , Connection c ) throws IOException { long recvDests [ ] = null ; final long sourceHSId = in . getLong ( ) ; final int destCount = in . getInt ( ) ; if ( destCount == POISON_PILL ) { if ( VoltDB . instance ( ) . getMode ( ) == OperationMode . SHUTTINGDOWN ) { return ; } byte messageBytes [ ] = new byte [ in . getInt ( ) ] ; in . get ( messageBytes ) ; String message = new String ( messageBytes , "UTF-8" ) ; message = String . format ( "Fatal error from id,hostname(%d,%s): %s" , m_hostId , hostnameAndIPAndPort ( ) , message ) ; int cause = in . getInt ( ) ; if ( cause == ForeignHost . CRASH_ME ) { int hid = VoltDB . instance ( ) . getHostMessenger ( ) . getHostId ( ) ; hostLog . debug ( "Poison Pill with target me was sent.: " + hid ) ; VoltDB . instance ( ) . halt ( ) ; } else if ( cause == ForeignHost . CRASH_ALL || cause == ForeignHost . CRASH_SPECIFIED ) { org . voltdb . VoltDB . crashLocalVoltDB ( message , false , null ) ; } else if ( cause == ForeignHost . PRINT_STACKTRACE ) { String dumpDir = new File ( VoltDB . instance ( ) . getVoltDBRootPath ( ) , "thread_dumps" ) . getAbsolutePath ( ) ; String fileName = m_hostMessenger . getHostname ( ) + "_host-" + m_hostId + "_" + System . currentTimeMillis ( ) + ".jstack" ; VoltDB . dumpThreadTraceToFile ( dumpDir , fileName ) ; } else { hostLog . error ( "Invalid Cause in poison pill: " + cause ) ; } return ; } else if ( destCount == STOPNODE_NOTICE ) { int targetHostId = in . getInt ( ) ; hostLog . info ( "Receive StopNode notice for host " + targetHostId ) ; m_hostMessenger . addStopNodeNotice ( targetHostId ) ; return ; } recvDests = new long [ destCount ] ; for ( int i = 0 ; i < destCount ; i ++ ) { recvDests [ i ] = in . getLong ( ) ; } final VoltMessage message = m_hostMessenger . getMessageFactory ( ) . createMessageFromBuffer ( in , sourceHSId ) ; if ( message instanceof SiteFailureMessage && ! ( message instanceof SiteFailureForwardMessage ) ) { SiteFailureMessage sfm = ( SiteFailureMessage ) message ; for ( FaultMessage fm : sfm . asFaultMessages ( ) ) { m_hostMessenger . relayForeignHostFailed ( fm ) ; } } for ( int i = 0 ; i < destCount ; i ++ ) { deliverMessage ( recvDests [ i ] , message ) ; } m_lastMessageMillis . lazySet ( EstTime . currentTimeMillis ( ) ) ; }
Read data from the network . Runs in the context of PicoNetwork thread when data is available .
32,346
private AvlNode < E > firstNode ( ) { AvlNode < E > root = rootReference . get ( ) ; if ( root == null ) { return null ; } AvlNode < E > node ; if ( range . hasLowerBound ( ) ) { E endpoint = range . getLowerEndpoint ( ) ; node = rootReference . get ( ) . ceiling ( comparator ( ) , endpoint ) ; if ( node == null ) { return null ; } if ( range . getLowerBoundType ( ) == BoundType . OPEN && comparator ( ) . compare ( endpoint , node . getElement ( ) ) == 0 ) { node = node . succ ; } } else { node = header . succ ; } return ( node == header || ! range . contains ( node . getElement ( ) ) ) ? null : node ; }
Returns the first node in the tree that is in range .
32,347
public static MediaType create ( String type , String subtype ) { return create ( type , subtype , ImmutableListMultimap . < String , String > of ( ) ) ; }
Creates a new media type with the given type and subtype .
32,348
private static int getSerializedParamSizeForApplyBinaryLog ( int streamCount , int remotePartitionCount , int concatLogSize ) { int serializedParamSize = 2 + 1 + 4 + 1 + 4 + 1 + 4 + 4 + ( 4 + 8 * remotePartitionCount ) * streamCount + 1 + 4 + 4 + ( 4 + 8 + 8 + 4 + 4 + 16 ) * streamCount + 1 + 4 + 4 + 4 * streamCount + concatLogSize + 1 + 1 + 1 + 4 ; return serializedParamSize ; }
calculate based on BinaryLogHelper and ParameterSet . fromArrayNoCopy
32,349
void restart ( ) { setNeedsRollback ( false ) ; m_haveDistributedInitTask = false ; m_isRestart = true ; m_haveSentfragment = false ; m_drBufferChangedAgg = 0 ; }
Used to reset the internal state of this transaction so it can be successfully restarted
32,350
public void setupProcedureResume ( int [ ] dependencies ) { m_localWork = null ; m_remoteWork = null ; m_remoteDeps = null ; m_remoteDepTables . clear ( ) ; }
Overrides needed by MpProcedureRunner
32,351
public void setupProcedureResume ( List < Integer > deps ) { setupProcedureResume ( com . google_voltpatches . common . primitives . Ints . toArray ( deps ) ) ; }
I met this List at bandcamp ...
32,352
public void restartFragment ( FragmentResponseMessage message , List < Long > masters , Map < Integer , Long > partitionMastersMap ) { final int partionId = message . getPartitionId ( ) ; Long restartHsid = partitionMastersMap . get ( partionId ) ; Long hsid = message . getExecutorSiteId ( ) ; if ( ! hsid . equals ( restartHsid ) ) { m_masterMapForFragmentRestart . clear ( ) ; m_masterMapForFragmentRestart . put ( restartHsid , hsid ) ; updateMasters ( masters , partitionMastersMap ) ; } if ( restartHsid == null ) { restartHsid = hsid ; } if ( tmLog . isDebugEnabled ( ) ) { tmLog . debug ( "Rerouted fragment from " + CoreUtils . hsIdToString ( hsid ) + " to " + CoreUtils . hsIdToString ( restartHsid ) + "\n" + m_remoteWork ) ; } m_fragmentRestarted = true ; m_mbox . send ( restartHsid , m_remoteWork ) ; }
Restart this fragment after the fragment is mis - routed from MigratePartitionLeader If the masters have been updated the fragment will be routed to its new master . The fragment will be routed to the old master . until new master is updated .
32,353
private boolean checkNewUniqueIndex ( Index newIndex ) { Table table = ( Table ) newIndex . getParent ( ) ; CatalogMap < Index > existingIndexes = m_originalIndexesByTable . get ( table . getTypeName ( ) ) ; for ( Index existingIndex : existingIndexes ) { if ( indexCovers ( newIndex , existingIndex ) ) { return true ; } } return false ; }
Check if there is a unique index that exists in the old catalog that is covered by the new index . That would mean adding this index can t fail with a duplicate key .
32,354
private String createViewDisallowedMessage ( String viewName , String singleTableName ) { boolean singleTable = ( singleTableName != null ) ; return String . format ( "Unable to create %sview %s %sbecause the view definition uses operations that cannot always be applied if %s." , ( singleTable ? "single table " : "multi-table " ) , viewName , ( singleTable ? String . format ( "on table %s " , singleTableName ) : "" ) , ( singleTable ? "the table already contains data" : "none of the source tables are empty" ) ) ; }
Return an error message asserting that we cannot create a view with a given name .
32,355
private TablePopulationRequirements getMVHandlerInfoMessage ( MaterializedViewHandlerInfo mvh ) { if ( ! mvh . getIssafewithnonemptysources ( ) ) { TablePopulationRequirements retval ; String viewName = mvh . getDesttable ( ) . getTypeName ( ) ; String errorMessage = createViewDisallowedMessage ( viewName , null ) ; retval = new TablePopulationRequirements ( viewName ) ; retval . setErrorMessage ( errorMessage ) ; for ( TableRef tref : mvh . getSourcetables ( ) ) { String tableName = tref . getTable ( ) . getTypeName ( ) ; retval . addTableName ( tableName ) ; } return retval ; } return null ; }
Check a MaterializedViewHandlerInfo object for safety . Return an object with table population requirements on the table for it to be allowed . The return object if it is non - null will have a set of names of tables one of which must be empty if the view can be created . It will also have an error message .
32,356
private void writeModification ( CatalogType newType , CatalogType prevType , String field ) { if ( checkModifyIgnoreList ( newType , prevType , field ) ) { return ; } String errorMessage = checkModifyWhitelist ( newType , prevType , field ) ; if ( errorMessage != null ) { List < TablePopulationRequirements > responseList = checkModifyIfTableIsEmptyWhitelist ( newType , prevType , field ) ; processModifyResponses ( errorMessage , responseList ) ; } if ( ! m_requiresCatalogDiffCmdsApplyToEE && checkCatalogDiffShouldApplyToEE ( newType ) ) { m_requiresCatalogDiffCmdsApplyToEE = true ; } m_serializer . writeCommandForField ( newType , field , true ) ; if ( ( newType instanceof Database ) && field . equals ( "schema" ) ) { return ; } CatalogChangeGroup cgrp = m_changes . get ( DiffClass . get ( newType ) ) ; cgrp . processChange ( newType , prevType , field ) ; }
Add a modification
32,357
protected static boolean checkCatalogDiffShouldApplyToEE ( final CatalogType suspect ) { if ( suspect instanceof Cluster || suspect instanceof Database ) { return true ; } if ( suspect instanceof Function ) { return true ; } if ( suspect instanceof Table || suspect instanceof TableRef || suspect instanceof Column || suspect instanceof ColumnRef || suspect instanceof Index || suspect instanceof IndexRef || suspect instanceof Constraint || suspect instanceof ConstraintRef || suspect instanceof MaterializedViewInfo || suspect instanceof MaterializedViewHandlerInfo ) { return true ; } if ( suspect instanceof Statement && ( suspect . getParent ( ) instanceof Procedure == false ) ) { return true ; } if ( suspect instanceof PlanFragment && suspect . getParent ( ) instanceof Statement && ( suspect . getParent ( ) . getParent ( ) instanceof Procedure == false ) ) { return true ; } if ( suspect instanceof Connector || suspect instanceof ConnectorProperty || suspect instanceof ConnectorTableInfo ) { return true ; } return false ; }
Our EE has a list of Catalog items that are in use but Java catalog contains much more . Some of the catalog diff commands will only be useful to Java . So this function will decide whether the
32,358
private void processModifyResponses ( String errorMessage , List < TablePopulationRequirements > responseList ) { assert ( errorMessage != null ) ; if ( responseList == null ) { m_supported = false ; m_errors . append ( errorMessage + "\n" ) ; return ; } for ( TablePopulationRequirements response : responseList ) { String objectName = response . getObjectName ( ) ; String nonEmptyErrorMessage = response . getErrorMessage ( ) ; assert ( nonEmptyErrorMessage != null ) ; TablePopulationRequirements popreq = m_tablesThatMustBeEmpty . get ( objectName ) ; if ( popreq == null ) { popreq = response ; m_tablesThatMustBeEmpty . put ( objectName , popreq ) ; } else { String newErrorMessage = popreq . getErrorMessage ( ) + "\n " + response . getErrorMessage ( ) ; popreq . setErrorMessage ( newErrorMessage ) ; } } }
After we decide we can t modify add or delete something on a full table we do a check to see if we can do that on an empty table . The original error and any response from the empty table check is processed here . This code is basically in this method so it s not repeated 3 times for modify add and delete . See where it s called for context . If the responseList equals null it is not possible to modify otherwise we do the check described above for every element in the responseList if there is no element in the responseList it means no tables must be empty which is totally fine .
32,359
private void writeDeletion ( CatalogType prevType , CatalogType newlyChildlessParent , String mapName ) { if ( checkDeleteIgnoreList ( prevType , newlyChildlessParent , mapName , prevType . getTypeName ( ) ) ) { return ; } String errorMessage = checkAddDropWhitelist ( prevType , ChangeType . DELETION ) ; if ( errorMessage != null ) { TablePopulationRequirements response = checkAddDropIfTableIsEmptyWhitelist ( prevType , ChangeType . DELETION ) ; List < TablePopulationRequirements > responseList = null ; if ( response != null ) { responseList = Collections . singletonList ( response ) ; } processModifyResponses ( errorMessage , responseList ) ; } if ( ! m_requiresCatalogDiffCmdsApplyToEE && checkCatalogDiffShouldApplyToEE ( prevType ) ) { m_requiresCatalogDiffCmdsApplyToEE = true ; } m_serializer . writeDeleteDiffStatement ( prevType , mapName ) ; CatalogChangeGroup cgrp = m_changes . get ( DiffClass . get ( prevType ) ) ; cgrp . processDeletion ( prevType , newlyChildlessParent ) ; }
Add a deletion
32,360
private void writeAddition ( CatalogType newType ) { if ( checkAddIgnoreList ( newType ) ) { return ; } String errorMessage = checkAddDropWhitelist ( newType , ChangeType . ADDITION ) ; if ( errorMessage != null ) { TablePopulationRequirements response = checkAddDropIfTableIsEmptyWhitelist ( newType , ChangeType . ADDITION ) ; List < TablePopulationRequirements > responseList = null ; if ( response != null ) { responseList = Collections . singletonList ( response ) ; } processModifyResponses ( errorMessage , responseList ) ; } if ( ! m_requiresCatalogDiffCmdsApplyToEE && checkCatalogDiffShouldApplyToEE ( newType ) ) { m_requiresCatalogDiffCmdsApplyToEE = true ; } newType . accept ( m_serializer ) ; CatalogChangeGroup cgrp = m_changes . get ( DiffClass . get ( newType ) ) ; cgrp . processAddition ( newType ) ; }
Add an addition
32,361
private void getCommandsToDiff ( String mapName , CatalogMap < ? extends CatalogType > prevMap , CatalogMap < ? extends CatalogType > newMap ) { assert ( prevMap != null ) ; assert ( newMap != null ) ; for ( CatalogType prevType : prevMap ) { String name = prevType . getTypeName ( ) ; CatalogType newType = newMap . get ( name ) ; if ( newType == null ) { writeDeletion ( prevType , newMap . m_parent , mapName ) ; continue ; } diffRecursively ( prevType , newType ) ; } for ( CatalogType newType : newMap ) { CatalogType prevType = prevMap . get ( newType . getTypeName ( ) ) ; if ( prevType != null ) continue ; writeAddition ( newType ) ; } }
Check if all the children in prevMap are present and identical in newMap . Then check if anything is in newMap that isn t in prevMap .
32,362
public String getSQL ( ) { StringBuffer sb = new StringBuffer ( 64 ) ; switch ( opType ) { case OpTypes . VALUE : if ( valueData == null ) { return Tokens . T_NULL ; } return dataType . convertToSQLString ( valueData ) ; case OpTypes . ROW : sb . append ( '(' ) ; for ( int i = 0 ; i < nodes . length ; i ++ ) { sb . append ( nodes [ i ] . getSQL ( ) ) ; if ( i < nodes . length - 1 ) { sb . append ( ',' ) ; } } sb . append ( ')' ) ; return sb . toString ( ) ; case OpTypes . TABLE : for ( int i = 0 ; i < nodes . length ; i ++ ) { sb . append ( nodes [ i ] . getSQL ( ) ) ; if ( i < nodes . length - 1 ) { sb . append ( ',' ) ; } } return sb . toString ( ) ; } switch ( opType ) { case OpTypes . ROW_SUBQUERY : case OpTypes . TABLE_SUBQUERY : break ; default : throw Error . runtimeError ( ErrorCode . U_S0500 , "Expression" ) ; } return sb . toString ( ) ; }
For use with CHECK constraints . Under development .
32,363
void setDataType ( Session session , Type type ) { if ( opType == OpTypes . VALUE ) { valueData = type . convertToType ( session , valueData , dataType ) ; } dataType = type ; }
Set the data type
32,364
Expression replaceAliasInOrderBy ( Expression [ ] columns , int length ) { for ( int i = 0 ; i < nodes . length ; i ++ ) { if ( nodes [ i ] == null ) { continue ; } nodes [ i ] = nodes [ i ] . replaceAliasInOrderBy ( columns , length ) ; } return this ; }
return the expression for an alias used in an ORDER BY clause
32,365
public HsqlList resolveColumnReferences ( RangeVariable [ ] rangeVarArray , HsqlList unresolvedSet ) { return resolveColumnReferences ( rangeVarArray , rangeVarArray . length , unresolvedSet , true ) ; }
resolve tables and collect unresolved column expressions
32,366
void insertValuesIntoSubqueryTable ( Session session , PersistentStore store ) { for ( int i = 0 ; i < nodes . length ; i ++ ) { Object [ ] data = nodes [ i ] . getRowValue ( session ) ; for ( int j = 0 ; j < nodeDataTypes . length ; j ++ ) { data [ j ] = nodeDataTypes [ j ] . convertToType ( session , data [ j ] , nodes [ i ] . nodes [ j ] . dataType ) ; } Row row = ( Row ) store . getNewCachedObject ( session , data ) ; try { store . indexRow ( session , row ) ; } catch ( HsqlException e ) { } } }
Details of IN condition optimisation for 1 . 9 . 0 Predicates with SELECT are QUERY expressions
32,367
static QuerySpecification getCheckSelect ( Session session , Table t , Expression e ) { CompileContext compileContext = new CompileContext ( session ) ; QuerySpecification s = new QuerySpecification ( compileContext ) ; s . exprColumns = new Expression [ 1 ] ; s . exprColumns [ 0 ] = EXPR_TRUE ; RangeVariable range = new RangeVariable ( t , null , null , null , compileContext ) ; s . rangeVariables = new RangeVariable [ ] { range } ; HsqlList unresolved = e . resolveColumnReferences ( s . rangeVariables , null ) ; ExpressionColumn . checkColumnsResolved ( unresolved ) ; e . resolveTypes ( session , null ) ; if ( Type . SQL_BOOLEAN != e . getDataType ( ) ) { throw Error . error ( ErrorCode . X_42568 ) ; } Expression condition = new ExpressionLogical ( OpTypes . NOT , e ) ; s . queryCondition = condition ; s . resolveReferences ( session ) ; s . resolveTypes ( session ) ; return s ; }
Returns a Select object that can be used for checking the contents of an existing table against the given CHECK search condition .
32,368
static void collectAllExpressions ( HsqlList set , Expression e , OrderedIntHashSet typeSet , OrderedIntHashSet stopAtTypeSet ) { if ( e == null ) { return ; } if ( stopAtTypeSet . contains ( e . opType ) ) { return ; } for ( int i = 0 ; i < e . nodes . length ; i ++ ) { collectAllExpressions ( set , e . nodes [ i ] , typeSet , stopAtTypeSet ) ; } if ( typeSet . contains ( e . opType ) ) { set . add ( e ) ; } if ( e . subQuery != null && e . subQuery . queryExpression != null ) { e . subQuery . queryExpression . collectAllExpressions ( set , typeSet , stopAtTypeSet ) ; } }
collect all extrassions of a set of expression types appearing anywhere in a select statement and its subselects etc .
32,369
protected String getUniqueId ( final Session session ) { if ( cached_id != null ) { return cached_id ; } cached_id = new String ( ) ; if ( getType ( ) != OpTypes . VALUE && getType ( ) != OpTypes . COLUMN ) { traverse ( this , session ) ; } long nodeId = session . getNodeIdForExpression ( this ) ; cached_id = Long . toString ( nodeId ) ; return cached_id ; }
Get the hex address of this Expression Object in memory to be used as a unique identifier .
32,370
private VoltXMLElement convertUsingColumnrefToCoaleseExpression ( Session session , VoltXMLElement exp , Type dataType ) throws org . hsqldb_voltpatches . HSQLInterface . HSQLParseException { assert ( dataType != null ) ; exp . attributes . put ( "valuetype" , dataType . getNameString ( ) ) ; HashSet < String > tables = new HashSet < > ( ) ; ArrayDeque < VoltXMLElement > uniqueColumnrefs = new ArrayDeque < > ( ) ; for ( VoltXMLElement columnref : exp . children ) { String table = columnref . attributes . get ( "table" ) ; String tableAlias = columnref . attributes . get ( "tablealias" ) ; assert ( table != null ) ; String tableOrAlias = ( tableAlias == null ) ? table : tableAlias ; if ( tables . contains ( tableOrAlias ) ) { continue ; } tables . add ( tableOrAlias ) ; uniqueColumnrefs . add ( columnref ) ; } exp . children . clear ( ) ; assert ( uniqueColumnrefs . size ( ) > 1 ) ; VoltXMLElement lastAlternativeExpr = null ; VoltXMLElement resultColaesceExpr = null ; while ( true ) { VoltXMLElement next = uniqueColumnrefs . pop ( ) ; if ( uniqueColumnrefs . isEmpty ( ) ) { assert ( lastAlternativeExpr != null ) ; lastAlternativeExpr . children . add ( 0 , next ) ; break ; } VoltXMLElement isnull_expr = prototypes . get ( OpTypes . IS_NULL ) ; if ( isnull_expr == null ) { throwForUnsupportedExpression ( OpTypes . IS_NULL ) ; } isnull_expr = isnull_expr . duplicate ( ) ; isnull_expr . attributes . put ( "id" , this . getUniqueId ( session ) ) ; isnull_expr . children . add ( next ) ; VoltXMLElement alt_expr = prototypes . get ( OpTypes . ALTERNATIVE ) ; if ( alt_expr == null ) { throwForUnsupportedExpression ( OpTypes . ALTERNATIVE ) ; } alt_expr = alt_expr . duplicate ( ) ; alt_expr . attributes . put ( "id" , this . getUniqueId ( session ) ) ; alt_expr . attributes . put ( "valuetype" , dataType . getNameString ( ) ) ; alt_expr . children . add ( next ) ; VoltXMLElement coalesceExpr = exp . duplicate ( ) ; coalesceExpr . attributes . put ( "alias" , next . attributes . get ( "alias" ) ) ; coalesceExpr . attributes . put ( "column" , next . attributes . get ( "column" ) ) ; coalesceExpr . children . add ( isnull_expr ) ; coalesceExpr . children . add ( alt_expr ) ; if ( resultColaesceExpr == null ) { resultColaesceExpr = coalesceExpr ; } else { assert ( lastAlternativeExpr != null ) ; lastAlternativeExpr . children . add ( 0 , coalesceExpr ) ; } lastAlternativeExpr = alt_expr ; } assert ( resultColaesceExpr != null ) ; return resultColaesceExpr ; }
columnref T1 . C
32,371
private void appendOptionGroup ( StringBuffer buff , OptionGroup group ) { if ( ! group . isRequired ( ) ) { buff . append ( "[" ) ; } List < Option > optList = new ArrayList < Option > ( group . getOptions ( ) ) ; if ( getOptionComparator ( ) != null ) { Collections . sort ( optList , getOptionComparator ( ) ) ; } for ( Iterator < Option > it = optList . iterator ( ) ; it . hasNext ( ) ; ) { appendOption ( buff , it . next ( ) , true ) ; if ( it . hasNext ( ) ) { buff . append ( " | " ) ; } } if ( ! group . isRequired ( ) ) { buff . append ( "]" ) ; } }
Appends the usage clause for an OptionGroup to a StringBuffer . The clause is wrapped in square brackets if the group is required . The display of the options is handled by appendOption
32,372
private void appendOption ( StringBuffer buff , Option option , boolean required ) { if ( ! required ) { buff . append ( "[" ) ; } if ( option . getOpt ( ) != null ) { buff . append ( "-" ) . append ( option . getOpt ( ) ) ; } else { buff . append ( "--" ) . append ( option . getLongOpt ( ) ) ; } if ( option . hasArg ( ) && ( option . getArgName ( ) == null || option . getArgName ( ) . length ( ) != 0 ) ) { buff . append ( option . getOpt ( ) == null ? longOptSeparator : " " ) ; buff . append ( "<" ) . append ( option . getArgName ( ) != null ? option . getArgName ( ) : getArgName ( ) ) . append ( ">" ) ; } if ( ! required ) { buff . append ( "]" ) ; } }
Appends the usage clause for an Option to a StringBuffer .
32,373
public void printUsage ( PrintWriter pw , int width , String cmdLineSyntax ) { int argPos = cmdLineSyntax . indexOf ( ' ' ) + 1 ; printWrapped ( pw , width , getSyntaxPrefix ( ) . length ( ) + argPos , getSyntaxPrefix ( ) + cmdLineSyntax ) ; }
Print the cmdLineSyntax to the specified writer using the specified width .
32,374
protected StringBuffer renderOptions ( StringBuffer sb , int width , Options options , int leftPad , int descPad ) { final String lpad = createPadding ( leftPad ) ; final String dpad = createPadding ( descPad ) ; int max = 0 ; List < StringBuffer > prefixList = new ArrayList < StringBuffer > ( ) ; List < Option > optList = options . helpOptions ( ) ; if ( getOptionComparator ( ) != null ) { Collections . sort ( optList , getOptionComparator ( ) ) ; } for ( Option option : optList ) { StringBuffer optBuf = new StringBuffer ( ) ; if ( option . getOpt ( ) == null ) { optBuf . append ( lpad ) . append ( " " ) . append ( getLongOptPrefix ( ) ) . append ( option . getLongOpt ( ) ) ; } else { optBuf . append ( lpad ) . append ( getOptPrefix ( ) ) . append ( option . getOpt ( ) ) ; if ( option . hasLongOpt ( ) ) { optBuf . append ( ',' ) . append ( getLongOptPrefix ( ) ) . append ( option . getLongOpt ( ) ) ; } } if ( option . hasArg ( ) ) { String argName = option . getArgName ( ) ; if ( argName != null && argName . length ( ) == 0 ) { optBuf . append ( ' ' ) ; } else { optBuf . append ( option . hasLongOpt ( ) ? longOptSeparator : " " ) ; optBuf . append ( "<" ) . append ( argName != null ? option . getArgName ( ) : getArgName ( ) ) . append ( ">" ) ; } } prefixList . add ( optBuf ) ; max = optBuf . length ( ) > max ? optBuf . length ( ) : max ; } int x = 0 ; for ( Iterator < Option > it = optList . iterator ( ) ; it . hasNext ( ) ; ) { Option option = it . next ( ) ; StringBuilder optBuf = new StringBuilder ( prefixList . get ( x ++ ) . toString ( ) ) ; if ( optBuf . length ( ) < max ) { optBuf . append ( createPadding ( max - optBuf . length ( ) ) ) ; } optBuf . append ( dpad ) ; int nextLineTabStop = max + descPad ; if ( option . getDescription ( ) != null ) { optBuf . append ( option . getDescription ( ) ) ; } renderWrappedText ( sb , width , nextLineTabStop , optBuf . toString ( ) ) ; if ( it . hasNext ( ) ) { sb . append ( getNewLine ( ) ) ; } } return sb ; }
Render the specified Options and return the rendered Options in a StringBuffer .
32,375
protected StringBuffer renderWrappedText ( StringBuffer sb , int width , int nextLineTabStop , String text ) { int pos = findWrapPos ( text , width , 0 ) ; if ( pos == - 1 ) { sb . append ( rtrim ( text ) ) ; return sb ; } sb . append ( rtrim ( text . substring ( 0 , pos ) ) ) . append ( getNewLine ( ) ) ; if ( nextLineTabStop >= width ) { nextLineTabStop = 1 ; } final String padding = createPadding ( nextLineTabStop ) ; while ( true ) { text = padding + text . substring ( pos ) . trim ( ) ; pos = findWrapPos ( text , width , 0 ) ; if ( pos == - 1 ) { sb . append ( text ) ; return sb ; } if ( text . length ( ) > width && pos == nextLineTabStop - 1 ) { pos = width ; } sb . append ( rtrim ( text . substring ( 0 , pos ) ) ) . append ( getNewLine ( ) ) ; } }
Render the specified text and return the rendered Options in a StringBuffer .
32,376
private static boolean functionMatches ( FunctionDescriptor existingFd , Type returnType , Type [ ] parameterTypes ) { if ( returnType != existingFd . m_type ) { return false ; } if ( parameterTypes . length != existingFd . m_paramTypes . length ) { return false ; } for ( int idx = 0 ; idx < parameterTypes . length ; idx ++ ) { if ( parameterTypes [ idx ] != existingFd . m_paramTypes [ idx ] ) { return false ; } } return true ; }
Return true iff the existing function descriptor matches the given return type and parameter types . These are all HSQLDB types not Volt types .
32,377
private static FunctionDescriptor findFunction ( String functionName , Type returnType , Type [ ] parameterType ) { m_logger . debug ( "Looking for UDF " + functionName ) ; FunctionDescriptor fd = FunctionDescriptor . m_by_LC_name . get ( functionName ) ; if ( fd == null ) { m_logger . debug ( " Not defined in by_LC_name. Maybe it's saved." ) ; fd = FunctionDescriptor . m_saved_functions . get ( functionName ) ; } if ( fd != null && functionMatches ( fd , returnType , parameterType ) ) { m_logger . debug ( " " + functionName + " is defined or saved. id == " + fd . getId ( ) ) ; return fd ; } m_logger . debug ( " " + functionName + " is not defined or saved." ) ; return null ; }
Given a function name and signature find if there is an existing definition or saved defintion which matches the name and signature and return the definition .
32,378
public static synchronized int registerTokenForUDF ( String functionName , int functionId , VoltType voltReturnType , VoltType [ ] voltParameterTypes ) { int retFunctionId ; Type hsqlReturnType = hsqlTypeFromVoltType ( voltReturnType ) ; Type [ ] hsqlParameterTypes = hsqlTypeFromVoltType ( voltParameterTypes ) ; FunctionDescriptor oldFd = findFunction ( functionName , hsqlReturnType , hsqlParameterTypes ) ; if ( oldFd != null ) { FunctionDescriptor . addDefinedFunction ( functionName , oldFd ) ; retFunctionId = oldFd . getId ( ) ; assert ( ( functionId < 0 ) || ( functionId == retFunctionId ) ) ; } else { if ( functionId > 0 ) { retFunctionId = functionId ; } else { retFunctionId = getNextFunctionId ( ) ; } FunctionDescriptor fd = makeFunctionDescriptorFromParts ( functionName , retFunctionId , hsqlReturnType , hsqlParameterTypes ) ; if ( isUserDefinedFunctionId ( retFunctionId ) ) { FunctionDescriptor . addDefinedFunction ( functionName , fd ) ; } m_logger . debug ( String . format ( "Added UDF \"%s\"(%d) with %d parameters" , functionName , retFunctionId , voltParameterTypes . length ) ) ; } if ( m_udfSeqId <= retFunctionId ) { m_udfSeqId = retFunctionId + 1 ; } return retFunctionId ; }
This function registers a UDF using VoltType values for the return type and parameter types .
32,379
public static Type hsqlTypeFromVoltType ( VoltType voltReturnType ) { Class < ? > typeClass = VoltType . classFromByteValue ( voltReturnType . getValue ( ) ) ; int typeNo = Types . getParameterSQLTypeNumber ( typeClass ) ; return Type . getDefaultTypeWithSize ( typeNo ) ; }
Convert a VoltType to an HSQL type .
32,380
public static Type [ ] hsqlTypeFromVoltType ( VoltType [ ] voltParameterTypes ) { Type [ ] answer = new Type [ voltParameterTypes . length ] ; for ( int idx = 0 ; idx < voltParameterTypes . length ; idx ++ ) { answer [ idx ] = hsqlTypeFromVoltType ( voltParameterTypes [ idx ] ) ; } return answer ; }
Map the single parameter hsqlTypeFromVoltType over an array .
32,381
void setNewNodes ( ) { int index = tTable . getIndexCount ( ) ; nPrimaryNode = new NodeAVLMemoryPointer ( this ) ; NodeAVL n = nPrimaryNode ; for ( int i = 1 ; i < index ; i ++ ) { n . nNext = new NodeAVLMemoryPointer ( this ) ; n = n . nNext ; } }
Used when data is read from the disk into the Cache the first time . New Nodes are created which are then indexed .
32,382
private void bufferCatchup ( int messageSize ) throws IOException { if ( m_tail != null && m_tail . size ( ) > 0 && messageSize > m_bufferHeadroom ) { m_tail . compile ( ) ; final RejoinTaskBuffer boundTail = m_tail ; final Runnable r = new Runnable ( ) { public void run ( ) { try { m_buffers . offer ( boundTail . getContainer ( ) ) ; if ( m_reader . sizeInBytes ( ) > m_overflowLimit * 1024 * 1024 ) { VoltDB . crashLocalVoltDB ( "On-disk task log is full. Please reduce " + "workload and try live rejoin again, or use blocking rejoin." ) ; } } catch ( Throwable t ) { VoltDB . crashLocalVoltDB ( "Error in task log buffering transactions" , true , t ) ; } } } ; m_es . execute ( r ) ; m_tail = null ; m_tasksPendingInCurrentTail = 0 ; } if ( m_tail == null ) { m_tail = new RejoinTaskBuffer ( m_partitionId , messageSize ) ; m_bufferHeadroom = RejoinTaskBuffer . DEFAULT_BUFFER_SIZE ; } }
The buffers are bound by the number of tasks in them . Once the current buffer has enough tasks it will be queued and a new buffer will be created .
32,383
public TransactionInfoBaseMessage getNextMessage ( ) throws IOException { if ( m_closed ) { throw new IOException ( "Closed" ) ; } if ( m_head == null ) { final Runnable r = new Runnable ( ) { public void run ( ) { try { BBContainer cont = m_reader . poll ( PersistentBinaryDeque . UNSAFE_CONTAINER_FACTORY ) ; if ( cont != null ) { m_headBuffers . offer ( new RejoinTaskBuffer ( cont ) ) ; } } catch ( Throwable t ) { VoltDB . crashLocalVoltDB ( "Error retrieving buffer data in task log" , true , t ) ; } finally { m_pendingPolls . decrementAndGet ( ) ; } } } ; for ( int ii = m_pendingPolls . get ( ) + m_headBuffers . size ( ) ; ii < 3 ; ii ++ ) { m_pendingPolls . incrementAndGet ( ) ; m_es . execute ( r ) ; } m_head = m_headBuffers . poll ( ) ; } TransactionInfoBaseMessage nextTask = null ; if ( m_head != null ) { nextTask = m_head . nextTask ( ) ; if ( nextTask == null ) { scheduleDiscard ( m_head ) ; m_head = null ; } else { m_taskCount -- ; } } else if ( ( m_taskCount - m_tasksPendingInCurrentTail == 0 ) && m_tail != null ) { m_tasksPendingInCurrentTail = 0 ; m_tail . compile ( ) ; if ( m_head != null ) { scheduleDiscard ( m_head ) ; } m_head = m_tail ; m_tail = null ; nextTask = getNextMessage ( ) ; } if ( nextTask != null && nextTask . getSpHandle ( ) > m_snapshotSpHandle ) { return nextTask ; } else { return null ; } }
Try to get the next task message from the queue .
32,384
void sendBufferSync ( ByteBuffer bb ) { try { sock . configureBlocking ( true ) ; if ( bb != closeConn ) { if ( sock != null ) { sock . write ( bb ) ; } packetSent ( ) ; } } catch ( IOException ie ) { LOG . error ( "Error sending data synchronously " , ie ) ; } }
send buffer without using the asynchronous calls to selector and then close the socket
32,385
private void cleanupWriterSocket ( PrintWriter pwriter ) { try { if ( pwriter != null ) { pwriter . flush ( ) ; pwriter . close ( ) ; } } catch ( Exception e ) { LOG . info ( "Error closing PrintWriter " , e ) ; } finally { try { close ( ) ; } catch ( Exception e ) { LOG . error ( "Error closing a command socket " , e ) ; } } }
clean up the socket related to a command and also make sure we flush the data before we do that
32,386
private boolean readLength ( SelectionKey k ) throws IOException { int len = lenBuffer . getInt ( ) ; if ( ! initialized && checkFourLetterWord ( k , len ) ) { return false ; } if ( len < 0 || len > BinaryInputArchive . maxBuffer ) { throw new IOException ( "Len error " + len ) ; } if ( zk == null ) { throw new IOException ( "ZooKeeperServer not running" ) ; } incomingBuffer = ByteBuffer . allocate ( len ) ; return true ; }
Reads the first 4 bytes of lenBuffer which could be true length or four letter word .
32,387
private void closeSock ( ) { if ( sock == null ) { return ; } LOG . debug ( "Closed socket connection for client " + sock . socket ( ) . getRemoteSocketAddress ( ) + ( sessionId != 0 ? " which had sessionid 0x" + Long . toHexString ( sessionId ) : " (no session established for client)" ) ) ; try { sock . socket ( ) . shutdownOutput ( ) ; } catch ( IOException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ignoring exception during output shutdown" , e ) ; } } try { sock . socket ( ) . shutdownInput ( ) ; } catch ( IOException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ignoring exception during input shutdown" , e ) ; } } try { sock . socket ( ) . close ( ) ; } catch ( IOException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ignoring exception during socket close" , e ) ; } } try { sock . close ( ) ; } catch ( IOException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ignoring exception during socketchannel close" , e ) ; } } sock = null ; }
Close resources associated with the sock of this cnxn .
32,388
void increment ( ) { long id = rand . nextInt ( config . tuples ) ; long toIncrement = rand . nextInt ( 5 ) ; try { client . callProcedure ( new CMCallback ( ) , "Increment" , toIncrement , id ) ; } catch ( IOException e ) { try { Thread . sleep ( 50 ) ; } catch ( Exception e2 ) { } } }
Run the Increment procedure on the server asynchronously .
32,389
public synchronized void writeToLog ( Session session , String statement ) { if ( logStatements && log != null ) { log . writeStatement ( session , statement ) ; } }
Records a Log entry for the specified SQL statement on behalf of the specified Session object .
32,390
public DataFileCache openTextCache ( Table table , String source , boolean readOnlyData , boolean reversed ) { return log . openTextCache ( table , source , readOnlyData , reversed ) ; }
Opens the TextCache object .
32,391
protected void initParams ( Database database , String baseFileName ) { HsqlDatabaseProperties props = database . getProperties ( ) ; fileName = baseFileName + ".data" ; backupFileName = baseFileName + ".backup" ; this . database = database ; fa = database . getFileAccess ( ) ; int cacheScale = props . getIntegerProperty ( HsqlDatabaseProperties . hsqldb_cache_scale , 14 , 8 , 18 ) ; int cacheSizeScale = props . getIntegerProperty ( HsqlDatabaseProperties . hsqldb_cache_size_scale , 10 , 6 , 20 ) ; int cacheFreeCountScale = props . getIntegerProperty ( HsqlDatabaseProperties . hsqldb_cache_free_count_scale , 9 , 6 , 12 ) ; incBackup = database . getProperties ( ) . isPropertyTrue ( HsqlDatabaseProperties . hsqldb_inc_backup ) ; cacheFileScale = database . getProperties ( ) . getIntegerProperty ( HsqlDatabaseProperties . hsqldb_cache_file_scale , 8 ) ; if ( cacheFileScale != 1 ) { cacheFileScale = 8 ; } cachedRowPadding = 8 ; if ( cacheFileScale > 8 ) { cachedRowPadding = cacheFileScale ; } cacheReadonly = database . isFilesReadOnly ( ) ; int lookupTableLength = 1 << cacheScale ; int avgRowBytes = 1 << cacheSizeScale ; maxCacheSize = lookupTableLength * 3 ; maxCacheBytes = maxCacheSize * avgRowBytes ; maxDataFileSize = cacheFileScale == 1 ? Integer . MAX_VALUE : ( long ) Integer . MAX_VALUE * cacheFileScale ; maxFreeBlocks = 1 << cacheFreeCountScale ; dataFile = null ; shadowFile = null ; }
initial external parameters are set here .
32,392
public void close ( boolean write ) { SimpleLog appLog = database . logger . appLog ; try { if ( cacheReadonly ) { if ( dataFile != null ) { dataFile . close ( ) ; dataFile = null ; } return ; } StopWatch sw = new StopWatch ( ) ; appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close(" + write + ") : start" ) ; if ( write ) { cache . saveAll ( ) ; Error . printSystemOut ( "saveAll: " + sw . elapsedTime ( ) ) ; appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close() : save data" ) ; if ( fileModified || freeBlocks . isModified ( ) ) { dataFile . seek ( LONG_EMPTY_SIZE ) ; dataFile . writeLong ( freeBlocks . getLostBlocksSize ( ) ) ; dataFile . seek ( LONG_FREE_POS_POS ) ; dataFile . writeLong ( fileFreePosition ) ; dataFile . seek ( FLAGS_POS ) ; int flag = BitMap . set ( 0 , FLAG_ISSAVED ) ; if ( hasRowInfo ) { flag = BitMap . set ( flag , FLAG_ROWINFO ) ; } dataFile . writeInt ( flag ) ; appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close() : flags" ) ; if ( dataFile . length ( ) != fileFreePosition ) { dataFile . seek ( fileFreePosition ) ; } appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close() : seek end" ) ; Error . printSystemOut ( "pos and flags: " + sw . elapsedTime ( ) ) ; } } if ( dataFile != null ) { dataFile . close ( ) ; appLog . sendLine ( SimpleLog . LOG_NORMAL , "DataFileCache.close() : close" ) ; dataFile = null ; Error . printSystemOut ( "close: " + sw . elapsedTime ( ) ) ; } boolean empty = fileFreePosition == INITIAL_FREE_POS ; if ( empty ) { fa . removeElement ( fileName ) ; fa . removeElement ( backupFileName ) ; } } catch ( Throwable e ) { appLog . logContext ( e , null ) ; throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_DataFileCache_close , new Object [ ] { e , fileName } ) ; } }
Parameter write indicates either an orderly close or a fast close without backup .
32,393
public void defrag ( ) { if ( cacheReadonly ) { return ; } if ( fileFreePosition == INITIAL_FREE_POS ) { return ; } database . logger . appLog . logContext ( SimpleLog . LOG_NORMAL , "start" ) ; try { boolean wasNio = dataFile . wasNio ( ) ; cache . saveAll ( ) ; DataFileDefrag dfd = new DataFileDefrag ( database , this , fileName ) ; dfd . process ( ) ; close ( false ) ; deleteFile ( wasNio ) ; renameDataFile ( wasNio ) ; backupFile ( ) ; database . getProperties ( ) . setProperty ( HsqlDatabaseProperties . hsqldb_cache_version , HsqlDatabaseProperties . THIS_CACHE_VERSION ) ; database . getProperties ( ) . save ( ) ; cache . clear ( ) ; cache = new Cache ( this ) ; open ( cacheReadonly ) ; dfd . updateTableIndexRoots ( ) ; dfd . updateTransactionRowIDs ( ) ; } catch ( Throwable e ) { database . logger . appLog . logContext ( e , null ) ; if ( e instanceof HsqlException ) { throw ( HsqlException ) e ; } else { throw new HsqlException ( e , Error . getMessage ( ErrorCode . GENERAL_IO_ERROR ) , ErrorCode . GENERAL_IO_ERROR ) ; } } database . logger . appLog . logContext ( SimpleLog . LOG_NORMAL , "end" ) ; }
Writes out all the rows to a new file without fragmentation .
32,394
public void remove ( int i , PersistentStore store ) { writeLock . lock ( ) ; try { CachedObject r = release ( i ) ; if ( r != null ) { int size = r . getStorageSize ( ) ; freeBlocks . add ( i , size ) ; } } finally { writeLock . unlock ( ) ; } }
Used when a row is deleted as a result of some DML or DDL statement . Removes the row from the cache data structures . Adds the file space for the row to the list of free positions .
32,395
public void restore ( CachedObject object ) { writeLock . lock ( ) ; try { int i = object . getPos ( ) ; cache . put ( i , object ) ; if ( storeOnInsert ) { saveRow ( object ) ; } } finally { writeLock . unlock ( ) ; } }
For a CacheObject that had been previously released from the cache . A new version is introduced using the preallocated space for the object .
32,396
static void deleteOrResetFreePos ( Database database , String filename ) { ScaledRAFile raFile = null ; database . getFileAccess ( ) . removeElement ( filename ) ; if ( database . isStoredFileAccess ( ) ) { return ; } if ( ! database . getFileAccess ( ) . isStreamElement ( filename ) ) { return ; } try { raFile = new ScaledRAFile ( database , filename , false ) ; raFile . seek ( LONG_FREE_POS_POS ) ; raFile . writeLong ( INITIAL_FREE_POS ) ; } catch ( IOException e ) { database . logger . appLog . logContext ( e , null ) ; } finally { if ( raFile != null ) { try { raFile . close ( ) ; } catch ( IOException e ) { database . logger . appLog . logContext ( e , null ) ; } } } }
This method deletes a data file or resets its free position . this is used only for nio files - not OOo files
32,397
public static boolean isDurableFragment ( byte [ ] planHash ) { long fragId = VoltSystemProcedure . hashToFragId ( planHash ) ; return ( fragId == PF_prepBalancePartitions || fragId == PF_balancePartitions || fragId == PF_balancePartitionsData || fragId == PF_balancePartitionsClearIndex || fragId == PF_distribute || fragId == PF_applyBinaryLog ) ; }
for sysprocs and we cant distinguish if this needs to be replayed or not .
32,398
protected void set ( ClientResponse response ) { if ( ! this . status . compareAndSet ( STATUS_RUNNING , STATUS_SUCCESS ) ) return ; this . response = response ; this . latch . countDown ( ) ; }
Sets the result of the operation and flag the execution call as completed .
32,399
private static List < JoinNode > generateInnerJoinOrdersForTree ( JoinNode subTree ) { List < JoinNode > tableNodes = subTree . generateLeafNodesJoinOrder ( ) ; List < List < JoinNode > > joinOrders = PermutationGenerator . generatePurmutations ( tableNodes ) ; List < JoinNode > newTrees = new ArrayList < > ( ) ; for ( List < JoinNode > joinOrder : joinOrders ) { newTrees . add ( JoinNode . reconstructJoinTreeFromTableNodes ( joinOrder , JoinType . INNER ) ) ; } AbstractExpression combinedWhereExpr = subTree . getAllFilters ( ) ; List < JoinNode > treePermutations = new ArrayList < > ( ) ; for ( JoinNode newTree : newTrees ) { if ( combinedWhereExpr != null ) { newTree . setWhereExpression ( combinedWhereExpr . clone ( ) ) ; } newTree . setId ( subTree . getId ( ) ) ; treePermutations . add ( newTree ) ; } return treePermutations ; }
Helper method to generate join orders for a join tree containing only INNER joins that can be obtained by the permutation of the original tables .