idx int64 0 41.2k | question stringlengths 83 4.15k | target stringlengths 5 715 |
|---|---|---|
30,700 | public void closePhysically ( ) throws SQLException { SQLException exception = null ; if ( ! isClosed && this . connection != null && ! this . connection . isClosed ( ) ) { try { this . connection . close ( ) ; } catch ( SQLException e ) { exception = e ; } } this . isClosed = true ; this . pooledConnection = null ; this . connection = null ; this . connectionDefaults = null ; this . connectionListeners . clear ( ) ; this . connectionListeners = null ; if ( exception != null ) { throw exception ; } } | Closes the connection physically . The pool is not notified of this . |
30,701 | public void startSnapshotWithTargets ( Collection < SnapshotDataTarget > targets , long now ) { for ( SnapshotTableTask t : m_snapshotTableTasks . values ( ) ) { t . getTarget ( ) ; } ArrayList < SnapshotDataTarget > targetsToClose = Lists . newArrayList ( ) ; for ( final SnapshotDataTarget target : targets ) { if ( target . needsFinalClose ( ) ) { targetsToClose . add ( target ) ; } } m_snapshotTargets = targetsToClose ; VoltDB . instance ( ) . schedulePriorityWork ( new Runnable ( ) { public void run ( ) { m_siteTaskerQueue . offer ( new SnapshotTask ( ) ) ; } } , ( m_quietUntil + ( 5 * m_snapshotPriority ) - now ) , 0 , TimeUnit . MILLISECONDS ) ; m_quietUntil += 5 * m_snapshotPriority ; } | This is called from the snapshot IO thread when the deferred setup is finished . It sets the data targets and queues a snapshot task onto the site thread . |
30,702 | private List < BBContainer > getOutputBuffers ( Collection < SnapshotTableTask > tableTasks , boolean noSchedule ) { final int desired = tableTasks . size ( ) ; while ( true ) { int available = m_availableSnapshotBuffers . get ( ) ; if ( desired > available ) { return null ; } if ( m_availableSnapshotBuffers . compareAndSet ( available , available - desired ) ) break ; } List < BBContainer > outputBuffers = new ArrayList < BBContainer > ( tableTasks . size ( ) ) ; for ( int ii = 0 ; ii < tableTasks . size ( ) ; ii ++ ) { final BBContainer origin = DBBPool . allocateDirectAndPool ( m_snapshotBufferLength ) ; outputBuffers . add ( createNewBuffer ( origin , noSchedule ) ) ; } return outputBuffers ; } | Create an output buffer for each task . |
30,703 | public void write ( RowOutputInterface out , ResultMetaData meta ) throws IOException { beforeFirst ( ) ; out . writeLong ( id ) ; out . writeInt ( size ) ; out . writeInt ( 0 ) ; out . writeInt ( size ) ; while ( hasNext ( ) ) { Object [ ] data = getNext ( ) ; out . writeData ( meta . getColumnCount ( ) , meta . columnTypes , data , null , null ) ; } beforeFirst ( ) ; } | reading and writing |
30,704 | public static ClientInterface create ( HostMessenger messenger , CatalogContext context , ReplicationRole replicationRole , Cartographer cartographer , InetAddress clientIntf , int clientPort , InetAddress adminIntf , int adminPort , SslContext SslContext ) throws Exception { final ClientInterface ci = new ClientInterface ( clientIntf , clientPort , adminIntf , adminPort , context , messenger , replicationRole , cartographer , SslContext ) ; return ci ; } | Static factory method to easily create a ClientInterface with the default settings . |
30,705 | public void initializeSnapshotDaemon ( HostMessenger messenger , GlobalServiceElector gse ) { m_snapshotDaemon . init ( this , messenger , new Runnable ( ) { public void run ( ) { bindAdapter ( m_snapshotDaemonAdapter , null ) ; } } , gse ) ; } | Initializes the snapshot daemon so that it s ready to take snapshots |
30,706 | public ClientInterfaceHandleManager bindAdapter ( final Connection adapter , final ClientInterfaceRepairCallback repairCallback ) { return bindAdapter ( adapter , repairCallback , false ) ; } | Tell the clientInterface about a connection adapter . |
30,707 | public void mayActivateSnapshotDaemon ( ) { SnapshotSchedule schedule = m_catalogContext . get ( ) . database . getSnapshotschedule ( ) . get ( "default" ) ; if ( schedule != null ) { final ListenableFuture < Void > future = m_snapshotDaemon . mayGoActiveOrInactive ( schedule ) ; future . addListener ( new Runnable ( ) { public void run ( ) { try { future . get ( ) ; } catch ( InterruptedException e ) { VoltDB . crashLocalVoltDB ( "Failed to make SnapshotDaemon active" , false , e ) ; } catch ( ExecutionException e ) { VoltDB . crashLocalVoltDB ( "Failed to make SnapshotDaemon active" , false , e ) ; } } } , CoreUtils . SAMETHREADEXECUTOR ) ; } } | in the cluster make our SnapshotDaemon responsible for snapshots |
30,708 | public void notifyOfCatalogUpdate ( ) { m_catalogContext . set ( VoltDB . instance ( ) . getCatalogContext ( ) ) ; if ( VoltDB . instance ( ) . getMode ( ) != OperationMode . INITIALIZING ) { mayActivateSnapshotDaemon ( ) ; StoredProcedureInvocation spi = new StoredProcedureInvocation ( ) ; spi . setProcName ( "@SystemCatalog" ) ; spi . setParams ( "PROCEDURES" ) ; spi . setClientHandle ( ASYNC_PROC_HANDLE ) ; notifyClients ( m_currentProcValues , m_currentProcSupplier , spi , OpsSelector . SYSTEMCATALOG ) ; } } | Set the flag that tells this client interface to update its catalog when it s threadsafe . |
30,709 | private final void checkForDeadConnections ( final long now ) { final ArrayList < Pair < Connection , Integer > > connectionsToRemove = new ArrayList < Pair < Connection , Integer > > ( ) ; for ( final ClientInterfaceHandleManager cihm : m_cihm . values ( ) ) { if ( VoltPort . class == cihm . connection . getClass ( ) ) { final int delta = cihm . connection . writeStream ( ) . calculatePendingWriteDelta ( now ) ; if ( delta > CLIENT_HANGUP_TIMEOUT ) { connectionsToRemove . add ( Pair . of ( cihm . connection , delta ) ) ; } } } for ( final Pair < Connection , Integer > p : connectionsToRemove ) { Connection c = p . getFirst ( ) ; networkLog . warn ( "Closing connection to " + c + " because it hasn't read a response that was pending for " + p . getSecond ( ) + " milliseconds" ) ; c . unregister ( ) ; } } | Check for dead connections by providing each connection with the current time so it can calculate the delta between now and the time the oldest message was queued for sending . |
30,710 | protected void shutdown ( ) throws InterruptedException { if ( m_deadConnectionFuture != null ) { m_deadConnectionFuture . cancel ( false ) ; try { m_deadConnectionFuture . get ( ) ; } catch ( Throwable t ) { } } if ( m_topologyCheckFuture != null ) { m_topologyCheckFuture . cancel ( false ) ; try { m_topologyCheckFuture . get ( ) ; } catch ( Throwable t ) { } } if ( m_maxConnectionUpdater != null ) { m_maxConnectionUpdater . cancel ( false ) ; } if ( m_acceptor != null ) { m_acceptor . shutdown ( ) ; } if ( m_adminAcceptor != null ) { m_adminAcceptor . shutdown ( ) ; } if ( m_snapshotDaemon != null ) { m_snapshotDaemon . shutdown ( ) ; } if ( m_migratePartitionLeaderExecutor != null ) { m_migratePartitionLeaderExecutor . shutdown ( ) ; } m_notifier . shutdown ( ) ; } | all your read buffers events .. or something .. |
30,711 | public void sendEOLMessage ( int partitionId ) { final long initiatorHSId = m_cartographer . getHSIdForMaster ( partitionId ) ; Iv2EndOfLogMessage message = new Iv2EndOfLogMessage ( partitionId ) ; m_mailbox . send ( initiatorHSId , message ) ; } | Sends an end of log message to the master of that partition . This should only be called at the end of replay . |
30,712 | private ClientResponseImpl getMispartitionedErrorResponse ( StoredProcedureInvocation task , Procedure catProc , Exception ex ) { Object invocationParameter = null ; try { invocationParameter = task . getParameterAtIndex ( catProc . getPartitionparameter ( ) ) ; } catch ( Exception ex2 ) { } String exMsg = "Unknown" ; if ( ex != null ) { exMsg = ex . getMessage ( ) ; } String errorMessage = "Error sending procedure " + task . getProcName ( ) + " to the correct partition. Make sure parameter values are correct." + " Parameter value " + invocationParameter + ", partition column " + catProc . getPartitioncolumn ( ) . getName ( ) + " type " + catProc . getPartitioncolumn ( ) . getType ( ) + " Message: " + exMsg ; authLog . warn ( errorMessage ) ; ClientResponseImpl clientResponse = new ClientResponseImpl ( ClientResponse . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , errorMessage , task . clientHandle ) ; return clientResponse ; } | Generate a mispartitioned response also log the message . |
30,713 | public boolean ceaseAllPublicFacingTrafficImmediately ( ) { try { if ( m_acceptor != null ) { m_acceptor . shutdown ( ) ; } if ( m_adminAcceptor != null ) { m_adminAcceptor . shutdown ( ) ; } } catch ( InterruptedException e ) { log . error ( e ) ; return false ; } finally { m_isAcceptingConnections . set ( false ) ; m_cihm . clear ( ) ; } return true ; } | This is not designed to be a safe shutdown . This is designed to stop sending messages to clients as fast as possible . It is currently called from VoltDB . crash ... |
30,714 | void processMigratePartitionLeaderTask ( MigratePartitionLeaderMessage message ) { synchronized ( m_lock ) { if ( message . startMigratingPartitionLeaders ( ) ) { if ( m_migratePartitionLeaderExecutor == null ) { m_migratePartitionLeaderExecutor = Executors . newSingleThreadScheduledExecutor ( CoreUtils . getThreadFactory ( "MigratePartitionLeader" ) ) ; final int interval = Integer . parseInt ( System . getProperty ( "MIGRATE_PARTITION_LEADER_INTERVAL" , "1" ) ) ; final int delay = Integer . parseInt ( System . getProperty ( "MIGRATE_PARTITION_LEADER_DELAY" , "1" ) ) ; m_migratePartitionLeaderExecutor . scheduleAtFixedRate ( ( ) -> startMigratePartitionLeader ( message . isForStopNode ( ) ) , delay , interval , TimeUnit . SECONDS ) ; } hostLog . info ( "MigratePartitionLeader task is started." ) ; return ; } if ( m_migratePartitionLeaderExecutor != null ) { m_migratePartitionLeaderExecutor . shutdown ( ) ; m_migratePartitionLeaderExecutor = null ; } } hostLog . info ( "MigratePartitionLeader task is stopped." ) ; } | start or stop MigratePartitionLeader task |
30,715 | public VoltTable [ ] run ( SystemProcedureExecutionContext ctx ) { VoltDBInterface voltdb = VoltDB . instance ( ) ; OperationMode opMode = voltdb . getMode ( ) ; if ( ctx . isLowestSiteId ( ) ) { ZooKeeper zk = voltdb . getHostMessenger ( ) . getZK ( ) ; try { Stat stat ; OperationMode zkMode = null ; Code code ; do { stat = new Stat ( ) ; code = Code . BADVERSION ; try { byte [ ] data = zk . getData ( VoltZK . operationMode , false , stat ) ; zkMode = data == null ? opMode : OperationMode . valueOf ( data ) ; if ( zkMode == RUNNING ) { break ; } stat = zk . setData ( VoltZK . operationMode , RUNNING . getBytes ( ) , stat . getVersion ( ) ) ; code = Code . OK ; zkMode = RUNNING ; break ; } catch ( BadVersionException ex ) { code = ex . code ( ) ; } } while ( zkMode != RUNNING && code == Code . BADVERSION ) ; m_stat = stat ; voltdb . getHostMessenger ( ) . unpause ( ) ; voltdb . setMode ( RUNNING ) ; SnmpTrapSender snmp = voltdb . getSnmpTrapSender ( ) ; if ( snmp != null ) { snmp . resume ( "Cluster resumed." ) ; } } catch ( Exception e ) { throw new RuntimeException ( e ) ; } } VoltTable t = new VoltTable ( VoltSystemProcedure . STATUS_SCHEMA ) ; t . addRow ( VoltSystemProcedure . STATUS_OK ) ; return new VoltTable [ ] { t } ; } | Exit admin mode |
30,716 | private static File getNativeLibraryFile ( String libname ) { String pathFormat = "/org/voltdb/native/%s/x86_64" ; String libPath = null ; String osName = System . getProperty ( "os.name" ) . toLowerCase ( ) ; if ( osName . contains ( "mac" ) ) { libPath = String . format ( pathFormat , "Mac" ) ; } else if ( osName . contains ( "linux" ) ) { libPath = String . format ( pathFormat , "Linux" ) ; } else { throw new RuntimeException ( "Unsupported system: " + osName ) ; } String libFileName = System . mapLibraryName ( libname ) ; if ( NativeLibraryLoader . class . getResource ( libPath + "/" + libFileName ) == null ) { if ( osName . contains ( "mac" ) ) { libFileName = "lib" + libname + ".jnilib" ; } if ( NativeLibraryLoader . class . getResource ( libPath + "/" + libFileName ) == null ) { String msg = "Could not find library resource using path: " + libPath + "/" + libFileName ; s_hostLog . warn ( msg ) ; throw new RuntimeException ( msg ) ; } } File tmpFilePath = new File ( System . getProperty ( VOLT_TMP_DIR , System . getProperty ( "java.io.tmpdir" ) ) ) ; if ( s_hostLog . isDebugEnabled ( ) ) { s_hostLog . debug ( "Temp directory to which shared libs are extracted is: " + tmpFilePath . getAbsolutePath ( ) ) ; } try { return loadLibraryFile ( libPath , libFileName , tmpFilePath . getAbsolutePath ( ) ) ; } catch ( IOException e ) { s_hostLog . error ( "Error loading Volt library file from jar" , e ) ; throw new RuntimeException ( e ) ; } } | Returns the native library file copied into a readable location . |
30,717 | public static void writeString ( String value , ByteBuffer buf ) { if ( value == null ) { buf . putInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } byte [ ] strbytes = value . getBytes ( Constants . UTF8ENCODING ) ; int len = strbytes . length ; buf . putInt ( len ) ; buf . put ( strbytes ) ; } | Write a string in the standard VoltDB way |
30,718 | public static void writeVarbinary ( byte [ ] bytes , ByteBuffer buf ) throws IOException { if ( bytes == null ) { buf . putInt ( VoltType . NULL_STRING_LENGTH ) ; return ; } buf . putInt ( bytes . length ) ; buf . put ( bytes ) ; } | Write a set of bytes in the standard VoltDB way |
30,719 | private static long getMaxBidId ( Client client ) { long currentMaxBidId = 0 ; try { VoltTable vt = client . callProcedure ( "@AdHoc" , "select max(id) from bids" ) . getResults ( ) [ 0 ] ; vt . advanceRow ( ) ; currentMaxBidId = vt . getLong ( 0 ) ; if ( vt . wasNull ( ) ) { currentMaxBidId = 0 ; } } catch ( IOException | ProcCallException e ) { e . printStackTrace ( ) ; } return currentMaxBidId ; } | Find the current highest bid id in the bids table . We ll start generating new bids at this number plus one . |
30,720 | public void run ( ) { long bidId = m_bidId ++ ; long advertiserId = Math . abs ( m_rand . nextLong ( ) ) % NUM_ADVERTISERS ; GeographyValue bidRegion = Regions . pickRandomRegion ( ) ; TimestampType bidStartTime = new TimestampType ( ) ; TimestampType bidEndTime = new TimestampType ( bidStartTime . getTime ( ) + AdBrokerBenchmark . BID_DURATION_SECONDS * 1000000 ) ; double amount = 0.00001 + 0.01 * m_rand . nextDouble ( ) ; DecimalFormat df = new DecimalFormat ( "#.####" ) ; amount = Double . valueOf ( df . format ( amount ) ) ; try { m_client . callProcedure ( new NullCallback ( ) , "bids.Insert" , bidId , advertiserId , bidRegion , bidStartTime , bidEndTime , amount ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } } | This is the run method for this Runnable subclass . |
30,721 | String getName ( ) { int idx = mName . lastIndexOf ( '/' ) ; return ( idx > 0 ) ? mName . substring ( idx ) : mName ; } | Strip the other pathname components and return the basename |
30,722 | public AbstractExpression singlePartitioningExpression ( ) { AbstractExpression e = singlePartitioningExpressionForReport ( ) ; if ( e != null && isUsefulPartitioningExpression ( e ) ) { return e ; } return null ; } | smart accessor - only returns a value if it was unique and is useful |
30,723 | void analyzeTablePartitioning ( Collection < StmtTableScan > collection ) throws PlanningErrorException { m_countOfPartitionedTables = 0 ; for ( StmtTableScan tableScan : collection ) { if ( ! tableScan . getIsReplicated ( ) ) { ++ m_countOfPartitionedTables ; } } m_countOfIndependentlyPartitionedTables = m_countOfPartitionedTables ; } | This simple analysis counts the number of partitioned tables in the join tree of a query and initializes a guess for the count of independently partitioned tables . |
30,724 | public void resetAnalysisState ( ) { m_countOfIndependentlyPartitionedTables = - 1 ; m_countOfPartitionedTables = - 1 ; m_fullColumnName = null ; m_inferredExpression . clear ( ) ; m_inferredParameterIndex = - 1 ; m_inferredValue = null ; m_isDML = false ; setJoinValid ( true ) ; setJoinInvalidReason ( null ) ; m_partitionColForDML = null ; } | Sometimes when we fail to plan a statement we try again with different inputs using the same StatementPartitioning object . In this case it s incumbent on callers to reset the cached analysis state set by calling this method . |
30,725 | public boolean callProcedure ( Invocation invocation , ProcedureCallback callback ) { try { boolean result = m_importServerAdapter . callProcedure ( this , m_backPressurePredicate , callback , invocation . getProcedure ( ) , invocation . getParams ( ) ) ; reportStat ( result , invocation . getProcedure ( ) ) ; return result ; } catch ( Exception ex ) { rateLimitedLog ( Level . ERROR , ex , "%s: Error trying to import" , getName ( ) ) ; reportFailureStat ( invocation . getProcedure ( ) ) ; return false ; } } | This should be used importer implementations to execute a stored procedure . |
30,726 | public void rateLimitedLog ( Level level , Throwable cause , String format , Object ... args ) { m_logger . rateLimitedLog ( LOG_SUPPRESSION_INTERVAL_SECONDS , level , cause , format , args ) ; } | This rate limited log must be used by the importers to log messages that may happen frequently and must be rate limited . |
30,727 | protected void trace ( Throwable t , String msgFormat , Object ... args ) { m_logger . trace ( String . format ( msgFormat , args ) , t ) ; } | Log a TRACE level log message . |
30,728 | public void warn ( Throwable t , String msgFormat , Object ... args ) { m_logger . warn ( String . format ( msgFormat , args ) , t ) ; } | Log a WARN level log message . |
30,729 | public void add ( Right right ) { if ( isFull ) { return ; } if ( right . isFull ) { clear ( ) ; isFull = true ; return ; } isFullSelect |= right . isFullSelect ; isFullInsert |= right . isFullInsert ; isFullUpdate |= right . isFullUpdate ; isFullReferences |= right . isFullReferences ; isFullDelete |= right . isFullDelete ; if ( isFullSelect ) { selectColumnSet = null ; } else if ( right . selectColumnSet != null ) { if ( selectColumnSet == null ) { selectColumnSet = new OrderedHashSet ( ) ; } selectColumnSet . addAll ( right . selectColumnSet ) ; } if ( isFullInsert ) { insertColumnSet = null ; } else if ( right . insertColumnSet != null ) { if ( insertColumnSet == null ) { insertColumnSet = new OrderedHashSet ( ) ; } insertColumnSet . addAll ( right . insertColumnSet ) ; } if ( isFullUpdate ) { updateColumnSet = null ; } else if ( right . updateColumnSet != null ) { if ( updateColumnSet == null ) { updateColumnSet = new OrderedHashSet ( ) ; } updateColumnSet . addAll ( right . updateColumnSet ) ; } if ( isFullReferences ) { referencesColumnSet = null ; } else if ( right . referencesColumnSet != null ) { if ( referencesColumnSet == null ) { referencesColumnSet = new OrderedHashSet ( ) ; } referencesColumnSet . addAll ( right . referencesColumnSet ) ; } if ( isFullTrigger ) { triggerColumnSet = null ; } else if ( right . triggerColumnSet != null ) { if ( triggerColumnSet == null ) { triggerColumnSet = new OrderedHashSet ( ) ; } triggerColumnSet . addAll ( right . triggerColumnSet ) ; } } | Supports column level GRANT |
30,730 | public void remove ( SchemaObject object , Right right ) { if ( right . isFull ) { clear ( ) ; return ; } if ( isFull ) { isFull = false ; isFullSelect = isFullInsert = isFullUpdate = isFullReferences = isFullDelete = true ; } if ( right . isFullDelete ) { isFullDelete = false ; } if ( ! isFullSelect && selectColumnSet == null ) { } else if ( right . isFullSelect ) { isFullSelect = false ; selectColumnSet = null ; } else if ( right . selectColumnSet != null ) { if ( isFullSelect ) { isFullSelect = false ; selectColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } selectColumnSet . removeAll ( right . selectColumnSet ) ; if ( selectColumnSet . isEmpty ( ) ) { selectColumnSet = null ; } } if ( ! isFullInsert && insertColumnSet == null ) { } else if ( right . isFullInsert ) { isFullInsert = false ; insertColumnSet = null ; } else if ( right . insertColumnSet != null ) { if ( isFullInsert ) { isFullInsert = false ; insertColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } insertColumnSet . removeAll ( right . insertColumnSet ) ; if ( insertColumnSet . isEmpty ( ) ) { insertColumnSet = null ; } } if ( ! isFullUpdate && updateColumnSet == null ) { } else if ( right . isFullUpdate ) { isFullUpdate = false ; updateColumnSet = null ; } else if ( right . updateColumnSet != null ) { if ( isFullUpdate ) { isFullUpdate = false ; updateColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } updateColumnSet . removeAll ( right . updateColumnSet ) ; if ( updateColumnSet . isEmpty ( ) ) { updateColumnSet = null ; } } if ( ! isFullReferences && referencesColumnSet == null ) { } else if ( right . isFullReferences ) { isFullReferences = false ; referencesColumnSet = null ; } else if ( right . referencesColumnSet != null ) { if ( isFullReferences ) { isFullReferences = false ; referencesColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } referencesColumnSet . removeAll ( right . referencesColumnSet ) ; if ( referencesColumnSet . isEmpty ( ) ) { referencesColumnSet = null ; } } if ( ! isFullTrigger && triggerColumnSet == null ) { } else if ( right . isFullTrigger ) { isFullTrigger = false ; triggerColumnSet = null ; } else if ( right . triggerColumnSet != null ) { if ( isFullTrigger ) { isFullTrigger = false ; triggerColumnSet = ( ( Table ) object ) . getColumnNameSet ( ) ; } triggerColumnSet . removeAll ( right . triggerColumnSet ) ; if ( triggerColumnSet . isEmpty ( ) ) { triggerColumnSet = null ; } } } | supports column level REVOKE |
30,731 | static boolean containsAllColumns ( OrderedHashSet columnSet , Table table , boolean [ ] columnCheckList ) { for ( int i = 0 ; i < columnCheckList . length ; i ++ ) { if ( columnCheckList [ i ] ) { if ( columnSet == null ) { return false ; } if ( columnSet . contains ( table . getColumn ( i ) . getName ( ) ) ) { continue ; } return false ; } } return true ; } | Supports column level checks |
30,732 | String getTableRightsSQL ( Table table ) { StringBuffer sb = new StringBuffer ( ) ; if ( isFull ) { return Tokens . T_ALL ; } if ( isFullSelect ) { sb . append ( Tokens . T_SELECT ) ; sb . append ( ',' ) ; } else if ( selectColumnSet != null ) { sb . append ( Tokens . T_SELECT ) ; getColumnList ( table , selectColumnSet , sb ) ; sb . append ( ',' ) ; } if ( isFullInsert ) { sb . append ( Tokens . T_INSERT ) ; sb . append ( ',' ) ; } else if ( insertColumnSet != null ) { sb . append ( Tokens . T_INSERT ) ; getColumnList ( table , insertColumnSet , sb ) ; sb . append ( ',' ) ; } if ( isFullUpdate ) { sb . append ( Tokens . T_UPDATE ) ; sb . append ( ',' ) ; } else if ( updateColumnSet != null ) { sb . append ( Tokens . T_UPDATE ) ; getColumnList ( table , updateColumnSet , sb ) ; sb . append ( ',' ) ; } if ( isFullDelete ) { sb . append ( Tokens . T_DELETE ) ; sb . append ( ',' ) ; } if ( isFullReferences ) { sb . append ( Tokens . T_REFERENCES ) ; sb . append ( ',' ) ; } else if ( referencesColumnSet != null ) { sb . append ( Tokens . T_REFERENCES ) ; sb . append ( ',' ) ; } if ( isFullTrigger ) { sb . append ( Tokens . T_TRIGGER ) ; sb . append ( ',' ) ; } else if ( triggerColumnSet != null ) { sb . append ( Tokens . T_TRIGGER ) ; sb . append ( ',' ) ; } return sb . toString ( ) . substring ( 0 , sb . length ( ) - 1 ) ; } | supports column level GRANT |
30,733 | public synchronized int next ( ) { while ( nextPort <= MAX_STATIC_PORT ) { int port = nextPort ++ ; if ( MiscUtils . isBindable ( port ) ) { return port ; } } throw new RuntimeException ( "Exhausted all possible ports" ) ; } | Return the next bindable port |
30,734 | static Range < Long > range ( long start , long end ) { return Range . closed ( start , end ) . canonical ( DiscreteDomain . longs ( ) ) ; } | Returns a canonical range that can be added to the internal range set . Only ranges returned by this method can be added to the range set otherwise range operations like contains may yield unexpected results . Consult the Guava doc on Range for details . |
30,735 | private static long start ( Range < Long > range ) { if ( range . lowerBoundType ( ) == BoundType . OPEN ) { return DiscreteDomain . longs ( ) . next ( range . lowerEndpoint ( ) ) ; } else { return range . lowerEndpoint ( ) ; } } | Get the start of the range . Always use this method to get the start of a range because it respects the bound type . |
30,736 | private static long end ( Range < Long > range ) { if ( range . upperBoundType ( ) == BoundType . OPEN ) { return DiscreteDomain . longs ( ) . previous ( range . upperEndpoint ( ) ) ; } else { return range . upperEndpoint ( ) ; } } | Get the end of the range . Always use this method to get the end of a range because it respects the bound type . |
30,737 | public void append ( long start , long end ) { assert ( start <= end && ( m_map . isEmpty ( ) || start > end ( m_map . span ( ) ) ) ) ; addRange ( start , end ) ; } | Appends a range to the tracker . The range has to be after the last sequence number of the tracker . |
30,738 | public int truncate ( long newTruncationPoint ) { int truncated = 0 ; if ( m_map . isEmpty ( ) ) { m_map . add ( range ( newTruncationPoint , newTruncationPoint ) ) ; m_hasSentinel = true ; return truncated ; } if ( newTruncationPoint < getFirstSeqNo ( ) ) { return truncated ; } if ( m_hasSentinel ) { truncated -= 1 ; } final Iterator < Range < Long > > iter = m_map . asRanges ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { final Range < Long > next = iter . next ( ) ; if ( end ( next ) < newTruncationPoint ) { truncated += end ( next ) - start ( next ) + 1 ; iter . remove ( ) ; } else if ( next . contains ( newTruncationPoint ) ) { truncated += newTruncationPoint - start ( next ) + 1 ; iter . remove ( ) ; m_map . add ( range ( newTruncationPoint , end ( next ) ) ) ; m_hasSentinel = true ; return truncated ; } else { break ; } } if ( ! m_map . contains ( newTruncationPoint ) ) { m_map . add ( range ( newTruncationPoint , newTruncationPoint ) ) ; m_hasSentinel = true ; } return truncated ; } | Truncate the tracker to the given safe point . After truncation the new safe point will be the first sequence number of the tracker . If the new safe point is before the first sequence number of the tracker it s a no - op . If the map is empty truncation point will be the new safe point of tracker . |
30,739 | public void truncateAfter ( long newTruncationPoint ) { if ( size ( ) == 0 ) { m_map . add ( range ( newTruncationPoint , newTruncationPoint ) ) ; m_hasSentinel = true ; return ; } if ( newTruncationPoint > getLastSeqNo ( ) ) { return ; } final Iterator < Range < Long > > iter = m_map . asDescendingSetOfRanges ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { final Range < Long > next = iter . next ( ) ; if ( start ( next ) > newTruncationPoint ) { iter . remove ( ) ; } else if ( next . contains ( newTruncationPoint ) ) { iter . remove ( ) ; m_map . add ( range ( start ( next ) , newTruncationPoint ) ) ; return ; } else { break ; } } if ( m_map . isEmpty ( ) ) { m_map . add ( range ( newTruncationPoint , newTruncationPoint ) ) ; m_hasSentinel = true ; } } | Truncate the tracker to the given truncation point . After truncation any ranges after the new truncation point will be removed . If the new safe point is after the last sequence number of the tracker it s a no - op . If the map is empty truncation point will be the new safe point of tracker . |
30,740 | public Pair < Long , Long > getRangeContaining ( long seq ) { Range < Long > range = m_map . rangeContaining ( seq ) ; if ( range != null ) { return new Pair < Long , Long > ( start ( range ) , end ( range ) ) ; } return null ; } | Get range that contains given sequence number |
30,741 | public Pair < Long , Long > getFirstGap ( ) { if ( m_map . isEmpty ( ) || size ( ) < 2 ) { return null ; } Iterator < Range < Long > > iter = m_map . asRanges ( ) . iterator ( ) ; long start = end ( iter . next ( ) ) + 1 ; long end = start ( iter . next ( ) ) - 1 ; return new Pair < Long , Long > ( start , end ) ; } | Find range of the first gap if it exists . If there is only one entry range after the first entry is NOT a gap . |
30,742 | public int sizeInSequence ( ) { int sequence = 0 ; if ( m_map . isEmpty ( ) ) { return sequence ; } final Iterator < Range < Long > > iter = m_map . asRanges ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Range < Long > range = iter . next ( ) ; sequence += end ( range ) - start ( range ) + 1 ; } if ( m_hasSentinel ) { sequence -= 1 ; } return sequence ; } | Get total number of sequence from the tracker . |
30,743 | void sendEvent ( CallEvent call ) throws NoConnectionsException , IOException , ProcCallException { if ( call . endTS == null ) { assert ( call . startTS != null ) ; client . callProcedure ( new NullCallback ( ) , "BeginCall" , call . agentId , call . phoneNoStr ( ) , call . callId , call . startTS ) ; } else { assert ( call . startTS == null ) ; client . callProcedure ( new NullCallback ( ) , "EndCall" , call . agentId , call . phoneNoStr ( ) , call . callId , call . endTS ) ; } } | Send a call event to either BeginCall or EndCall based on the event . |
30,744 | public void write ( char [ ] c , int off , int len ) { ensureRoom ( len * 2 ) ; for ( int i = off ; i < len ; i ++ ) { int v = c [ i ] ; buffer [ count ++ ] = ( byte ) ( v >>> 8 ) ; buffer [ count ++ ] = ( byte ) v ; } } | additional public methods not in similar java . util classes |
30,745 | long executeSQL ( boolean isFinal ) throws VoltAbortException { long count = 0 ; VoltTable [ ] results = voltExecuteSQL ( isFinal ) ; for ( VoltTable result : results ) { long dmlUpdated = result . asScalarLong ( ) ; if ( dmlUpdated == 0 ) { throw new VoltAbortException ( "Insert failed for tuple." ) ; } if ( dmlUpdated > 1 ) { throw new VoltAbortException ( "Insert modified more than one tuple." ) ; } ++ count ; } return count ; } | Execute a set of queued inserts . Ensure each insert successfully inserts one row . Throw exception if not . |
30,746 | protected static boolean _isCloseSurrogateMethod ( final Class clazz , final Method method ) { return ( ( Connection . class . isAssignableFrom ( clazz ) || Statement . class . isAssignableFrom ( clazz ) ) && "close" . equals ( method . getName ( ) ) ) ; } | Simple test used only during static initialization . |
30,747 | protected static Class [ ] _computeProxiedInterface ( Object delegate ) { if ( delegate instanceof Array ) { return arrayInterface ; } else if ( delegate instanceof Connection ) { return connectionInterface ; } else if ( delegate instanceof CallableStatement ) { return callableStatementInterface ; } else if ( delegate instanceof DatabaseMetaData ) { return databaseMetaDataInterface ; } else if ( delegate instanceof PreparedStatement ) { return preparedStatementInterface ; } else if ( delegate instanceof ResultSet ) { return resultSetInterface ; } else if ( delegate instanceof Statement ) { return statementInterface ; } else { return null ; } } | Given a delegate retrieves the interface that must be implemented by a surrogate dynamic proxy to ensure pooling sensitive methods of the delegate are not exposed directly to clients . |
30,748 | protected void closeConnectionSurrogate ( ) throws Throwable { ConnectionPool connectionPool = this . connectionPool ; if ( connectionPool == null ) { Connection connection = ( Connection ) this . delegate ; try { connection . close ( ) ; } catch ( SQLException ex ) { } } else { Connection connection = ( Connection ) this . delegate ; StatementPool statementPool = this . statementPool ; connectionPool . checkIn ( connection , statementPool ) ; } } | Does work toward enabling reuse of the delegate when it is a Connection . |
30,749 | public static String jsonifyClusterTrackers ( Pair < Long , Long > lastConsumerUniqueIds , Map < Integer , Map < Integer , DRSiteDrIdTracker > > allProducerTrackers ) throws JSONException { JSONStringer stringer = new JSONStringer ( ) ; stringer . object ( ) ; stringer . keySymbolValuePair ( "lastConsumerSpUniqueId" , lastConsumerUniqueIds . getFirst ( ) ) ; stringer . keySymbolValuePair ( "lastConsumerMpUniqueId" , lastConsumerUniqueIds . getSecond ( ) ) ; stringer . key ( "trackers" ) . object ( ) ; if ( allProducerTrackers != null ) { for ( Map . Entry < Integer , Map < Integer , DRSiteDrIdTracker > > clusterTrackers : allProducerTrackers . entrySet ( ) ) { stringer . key ( Integer . toString ( clusterTrackers . getKey ( ) ) ) . object ( ) ; for ( Map . Entry < Integer , DRSiteDrIdTracker > e : clusterTrackers . getValue ( ) . entrySet ( ) ) { stringer . key ( e . getKey ( ) . toString ( ) ) ; stringer . value ( e . getValue ( ) . toJSON ( ) ) ; } stringer . endObject ( ) ; } } stringer . endObject ( ) ; stringer . endObject ( ) ; return stringer . toString ( ) ; } | Serialize the cluster trackers into JSON . |
30,750 | public static Map < Integer , Map < Integer , DRSiteDrIdTracker > > dejsonifyClusterTrackers ( final String jsonData , boolean resetLastReceivedLogIds ) throws JSONException { Map < Integer , Map < Integer , DRSiteDrIdTracker > > producerTrackers = new HashMap < > ( ) ; JSONObject clusterData = new JSONObject ( jsonData ) ; final JSONObject trackers = clusterData . getJSONObject ( "trackers" ) ; Iterator < String > clusterIdKeys = trackers . keys ( ) ; while ( clusterIdKeys . hasNext ( ) ) { final String clusterIdStr = clusterIdKeys . next ( ) ; final int clusterId = Integer . parseInt ( clusterIdStr ) ; final JSONObject trackerData = trackers . getJSONObject ( clusterIdStr ) ; Iterator < String > srcPidKeys = trackerData . keys ( ) ; while ( srcPidKeys . hasNext ( ) ) { final String srcPidStr = srcPidKeys . next ( ) ; final int srcPid = Integer . valueOf ( srcPidStr ) ; final JSONObject ids = trackerData . getJSONObject ( srcPidStr ) ; final DRSiteDrIdTracker tracker = new DRSiteDrIdTracker ( ids , resetLastReceivedLogIds ) ; Map < Integer , DRSiteDrIdTracker > clusterTrackers = producerTrackers . computeIfAbsent ( clusterId , k -> new HashMap < > ( ) ) ; clusterTrackers . put ( srcPid , tracker ) ; } } return producerTrackers ; } | Deserialize the trackers retrieved from each consumer partitions . |
30,751 | public static void mergeTrackers ( Map < Integer , Map < Integer , DRSiteDrIdTracker > > base , Map < Integer , Map < Integer , DRSiteDrIdTracker > > add ) { for ( Map . Entry < Integer , Map < Integer , DRSiteDrIdTracker > > clusterEntry : add . entrySet ( ) ) { final Map < Integer , DRSiteDrIdTracker > baseClusterEntry = base . get ( clusterEntry . getKey ( ) ) ; if ( baseClusterEntry == null ) { base . put ( clusterEntry . getKey ( ) , clusterEntry . getValue ( ) ) ; } else { for ( Map . Entry < Integer , DRSiteDrIdTracker > partitionEntry : clusterEntry . getValue ( ) . entrySet ( ) ) { final DRConsumerDrIdTracker basePartitionTracker = baseClusterEntry . get ( partitionEntry . getKey ( ) ) ; if ( basePartitionTracker == null ) { baseClusterEntry . put ( partitionEntry . getKey ( ) , partitionEntry . getValue ( ) ) ; } else { basePartitionTracker . mergeTracker ( partitionEntry . getValue ( ) ) ; } } } } } | Merge trackers in the additional map into the base map . |
30,752 | public JSONWriter array ( Iterable < ? extends JSONString > iter ) throws JSONException { array ( ) ; for ( JSONString element : iter ) { value ( element ) ; } endArray ( ) ; return this ; } | Append an array value based on a custom JSONString implementation . |
30,753 | public JSONWriter keySymbolValuePair ( String aKey , String aValue ) throws JSONException { assert ( aKey != null ) ; assert ( m_mode == 'k' ) ; assert ( m_scopeStack [ m_top ] . add ( aKey ) ) ; try { m_writer . write ( m_expectingComma ? ",\"" : "\"" ) ; m_writer . write ( aKey ) ; if ( aValue == null ) { m_writer . write ( "\":null" ) ; } else { m_writer . write ( "\":\"" ) ; m_writer . write ( JSONObject . quotable ( aValue ) ) ; m_writer . write ( '"' ) ; } } catch ( IOException e ) { throw new JSONException ( e ) ; } m_expectingComma = true ; return this ; } | Write a JSON key - value pair in one optimized step that assumes that the key is a symbol composed of normal characters requiring no escaping and asserts that keys are non - null and unique within an object ONLY if asserts are enabled . This method is most suitable in the common case where the caller is making a hard - coded series of calls with the same hard - coded strings for keys . Any sequencing errors can be detected in debug runs with asserts enabled . |
30,754 | public static byte [ ] gunzipBytes ( byte [ ] compressedBytes ) throws IOException { ByteArrayOutputStream bos = new ByteArrayOutputStream ( ( int ) ( compressedBytes . length * 1.5 ) ) ; InflaterOutputStream dos = new InflaterOutputStream ( bos ) ; dos . write ( compressedBytes ) ; dos . close ( ) ; return bos . toByteArray ( ) ; } | to avoid linking all that jazz into the client code |
30,755 | public Object [ ] getGroupData ( Object [ ] data ) { if ( isSimpleAggregate ) { if ( simpleAggregateData == null ) { simpleAggregateData = data ; return null ; } return simpleAggregateData ; } RowIterator it = groupIndex . findFirstRow ( session , store , data ) ; if ( it . hasNext ( ) ) { Row row = it . getNextRow ( ) ; if ( isAggregate ) { row . setChanged ( ) ; } return row . getData ( ) ; } return null ; } | Special case for isSimpleAggregate cannot use index lookup . |
30,756 | public void addConstraint ( Constraint c ) { int index = c . getConstraintType ( ) == Constraint . PRIMARY_KEY ? 0 : constraintList . length ; constraintList = ( Constraint [ ] ) ArrayUtil . toAdjustedArray ( constraintList , c , index , 1 ) ; updateConstraintLists ( ) ; } | Adds a constraint . |
30,757 | Constraint getUniqueConstraintForColumns ( int [ ] cols ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . isUniqueWithColumns ( cols ) ) { return c ; } } return null ; } | Returns the UNIQUE or PK constraint with the given column signature . |
30,758 | Constraint getUniqueConstraintForColumns ( int [ ] mainTableCols , int [ ] refTableCols ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . hasExprs ( ) ) { continue ; } int type = c . getConstraintType ( ) ; if ( type != Constraint . UNIQUE && type != Constraint . PRIMARY_KEY ) { continue ; } int [ ] constraintCols = c . getMainColumns ( ) ; if ( constraintCols . length != mainTableCols . length ) { continue ; } if ( ArrayUtil . areEqual ( constraintCols , mainTableCols , mainTableCols . length , true ) ) { return c ; } if ( ArrayUtil . areEqualSets ( constraintCols , mainTableCols ) ) { int [ ] newRefTableCols = new int [ mainTableCols . length ] ; for ( int j = 0 ; j < mainTableCols . length ; j ++ ) { int pos = ArrayUtil . find ( constraintCols , mainTableCols [ j ] ) ; newRefTableCols [ pos ] = refTableCols [ j ] ; } for ( int j = 0 ; j < mainTableCols . length ; j ++ ) { refTableCols [ j ] = newRefTableCols [ j ] ; } return c ; } } return null ; } | Returns the UNIQUE or PK constraint with the given column signature . Modifies the composition of refTableCols if necessary . |
30,759 | Constraint getFKConstraintForColumns ( Table tableMain , int [ ] mainCols , int [ ] refCols ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . isEquivalent ( tableMain , mainCols , this , refCols ) ) { return c ; } } return null ; } | Returns any foreign key constraint equivalent to the column sets |
30,760 | public Constraint getUniqueOrPKConstraintForIndex ( Index index ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . getMainIndex ( ) == index && ( c . getConstraintType ( ) == Constraint . UNIQUE || c . getConstraintType ( ) == Constraint . PRIMARY_KEY ) ) { return c ; } } return null ; } | Returns any unique Constraint using this index |
30,761 | int getNextConstraintIndex ( int from , int type ) { for ( int i = from , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . getConstraintType ( ) == type ) { return i ; } } return - 1 ; } | Returns the next constraint of a given type |
30,762 | public void addColumn ( ColumnSchema column ) { String name = column . getName ( ) . name ; if ( findColumn ( name ) >= 0 ) { throw Error . error ( ErrorCode . X_42504 , name ) ; } if ( column . isIdentity ( ) ) { if ( identityColumn != - 1 ) { throw Error . error ( ErrorCode . X_42525 , name ) ; } identityColumn = getColumnCount ( ) ; identitySequence = column . getIdentitySequence ( ) ; } addColumnNoCheck ( column ) ; } | Performs the table level checks and adds a column to the table at the DDL level . Only used at table creation not at alter column . |
30,763 | void checkColumnsMatch ( int [ ] col , Table other , int [ ] othercol ) { for ( int i = 0 ; i < col . length ; i ++ ) { Type type = colTypes [ col [ i ] ] ; Type otherType = other . colTypes [ othercol [ i ] ] ; if ( type . typeComparisonGroup != otherType . typeComparisonGroup ) { throw Error . error ( ErrorCode . X_42562 ) ; } } } | Match two valid equal length columns arrays for type of columns |
30,764 | OrderedHashSet getDependentConstraints ( int colIndex ) { OrderedHashSet set = new OrderedHashSet ( ) ; for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . hasColumnOnly ( colIndex ) ) { set . add ( c ) ; } } return set ; } | Returns list of constraints dependent only on one column |
30,765 | OrderedHashSet getContainingConstraints ( int colIndex ) { OrderedHashSet set = new OrderedHashSet ( ) ; for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . hasColumnPlus ( colIndex ) ) { set . add ( c ) ; } } return set ; } | Returns list of constraints dependent on more than one column |
30,766 | OrderedHashSet getDependentConstraints ( Constraint constraint ) { OrderedHashSet set = new OrderedHashSet ( ) ; for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . getConstraintType ( ) == Constraint . MAIN ) { if ( c . core . uniqueName == constraint . getName ( ) ) { set . add ( c ) ; } } } return set ; } | Returns list of MAIN constraints dependent on this PK or UNIQUE constraint |
30,767 | void checkColumnInFKConstraint ( int colIndex , int actionType ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { Constraint c = constraintList [ i ] ; if ( c . getConstraintType ( ) == Constraint . FOREIGN_KEY && c . hasColumn ( colIndex ) && ( actionType == c . getUpdateAction ( ) || actionType == c . getDeleteAction ( ) ) ) { HsqlName name = c . getName ( ) ; throw Error . error ( ErrorCode . X_42533 , name . getSchemaQualifiedStatementName ( ) ) ; } } } | Used for column defaults and nullability . Checks whether column is in an FK with a given referential action type . |
30,768 | public int getColumnIndex ( String name ) { int i = findColumn ( name ) ; if ( i == - 1 ) { throw Error . error ( ErrorCode . X_42501 , name ) ; } return i ; } | Returns the index of given column name or throws if not found |
30,769 | void setDefaultExpression ( int columnIndex , Expression def ) { ColumnSchema column = getColumn ( columnIndex ) ; column . setDefaultExpression ( def ) ; setColumnTypeVars ( columnIndex ) ; } | Sets the SQL default value for a columm . |
30,770 | void resetDefaultsFlag ( ) { hasDefaultValues = false ; for ( int i = 0 ; i < colDefaults . length ; i ++ ) { hasDefaultValues = hasDefaultValues || colDefaults [ i ] != null ; } } | sets the flag for the presence of any default expression |
30,771 | Index getIndexForColumn ( int col ) { int i = bestIndexForColumn [ col ] ; return i == - 1 ? null : this . indexList [ i ] ; } | Finds an existing index for a column |
30,772 | public void createPrimaryKey ( HsqlName indexName , int [ ] columns , boolean columnsNotNull ) { if ( primaryKeyCols != null ) { throw Error . runtimeError ( ErrorCode . U_S0500 , "Table" ) ; } if ( columns == null ) { columns = ValuePool . emptyIntArray ; } else { for ( int i = 0 ; i < columns . length ; i ++ ) { getColumn ( columns [ i ] ) . setPrimaryKey ( true ) ; } } primaryKeyCols = columns ; setColumnStructures ( ) ; primaryKeyTypes = new Type [ primaryKeyCols . length ] ; ArrayUtil . projectRow ( colTypes , primaryKeyCols , primaryKeyTypes ) ; primaryKeyColsSequence = new int [ primaryKeyCols . length ] ; ArrayUtil . fillSequence ( primaryKeyColsSequence ) ; HsqlName name = indexName ; if ( name == null ) { name = database . nameManager . newAutoName ( "IDX" , getSchemaName ( ) , getName ( ) , SchemaObject . INDEX ) ; } createPrimaryIndex ( primaryKeyCols , primaryKeyTypes , name ) ; setBestRowIdentifiers ( ) ; } | Creates a single or multi - column primary key and index . sets the colTypes array . Finalises the creation of the table . ( fredt |
30,773 | void addTrigger ( TriggerDef td , HsqlName otherName ) { int index = triggerList . length ; if ( otherName != null ) { int pos = getTriggerIndex ( otherName . name ) ; if ( pos != - 1 ) { index = pos + 1 ; } } triggerList = ( TriggerDef [ ] ) ArrayUtil . toAdjustedArray ( triggerList , td , index , 1 ) ; TriggerDef [ ] list = triggerLists [ td . vectorIndex ] ; index = list . length ; if ( otherName != null ) { for ( int i = 0 ; i < list . length ; i ++ ) { TriggerDef trigger = list [ i ] ; if ( trigger . name . name . equals ( otherName . name ) ) { index = i + 1 ; break ; } } } list = ( TriggerDef [ ] ) ArrayUtil . toAdjustedArray ( list , td , index , 1 ) ; triggerLists [ td . vectorIndex ] = list ; } | Adds a trigger . |
30,774 | TriggerDef getTrigger ( String name ) { for ( int i = triggerList . length - 1 ; i >= 0 ; i -- ) { if ( triggerList [ i ] . name . name . equals ( name ) ) { return triggerList [ i ] ; } } return null ; } | Returns a trigger . |
30,775 | void removeTrigger ( String name ) { TriggerDef td = null ; for ( int i = 0 ; i < triggerList . length ; i ++ ) { td = triggerList [ i ] ; if ( td . name . name . equals ( name ) ) { td . terminate ( ) ; triggerList = ( TriggerDef [ ] ) ArrayUtil . toAdjustedArray ( triggerList , null , i , - 1 ) ; break ; } } if ( td == null ) { return ; } int index = td . vectorIndex ; for ( int j = 0 ; j < triggerLists [ index ] . length ; j ++ ) { td = triggerLists [ index ] [ j ] ; if ( td . name . name . equals ( name ) ) { td . terminate ( ) ; triggerLists [ index ] = ( TriggerDef [ ] ) ArrayUtil . toAdjustedArray ( triggerLists [ index ] , null , j , - 1 ) ; break ; } } } | Drops a trigger . |
30,776 | void releaseTriggers ( ) { for ( int i = 0 ; i < TriggerDef . NUM_TRIGS ; i ++ ) { for ( int j = 0 ; j < triggerLists [ i ] . length ; j ++ ) { triggerLists [ i ] [ j ] . terminate ( ) ; } triggerLists [ i ] = TriggerDef . emptyArray ; } } | Drops all triggers . |
30,777 | int getIndexIndex ( String indexName ) { Index [ ] indexes = indexList ; for ( int i = 0 ; i < indexes . length ; i ++ ) { if ( indexName . equals ( indexes [ i ] . getName ( ) . name ) ) { return i ; } } return - 1 ; } | Returns the index of the Index object of the given name or - 1 if not found . |
30,778 | Index getIndex ( String indexName ) { Index [ ] indexes = indexList ; int i = getIndexIndex ( indexName ) ; return i == - 1 ? null : indexes [ i ] ; } | Returns the Index object of the given name or null if not found . |
30,779 | int getConstraintIndex ( String constraintName ) { for ( int i = 0 , size = constraintList . length ; i < size ; i ++ ) { if ( constraintList [ i ] . getName ( ) . name . equals ( constraintName ) ) { return i ; } } return - 1 ; } | Return the position of the constraint within the list |
30,780 | public Constraint getConstraint ( String constraintName ) { int i = getConstraintIndex ( constraintName ) ; return ( i < 0 ) ? null : constraintList [ i ] ; } | return the named constriant |
30,781 | Index createIndexForColumns ( int [ ] columns ) { HsqlName indexName = database . nameManager . newAutoName ( "IDX_T" , getSchemaName ( ) , getName ( ) , SchemaObject . INDEX ) ; try { Index index = createAndAddIndexStructure ( indexName , columns , null , null , false , false , false , false ) ; return index ; } catch ( Throwable t ) { return null ; } } | Used to create an index automatically for system tables . |
30,782 | void enforceRowConstraints ( Session session , Object [ ] data ) { for ( int i = 0 ; i < defaultColumnMap . length ; i ++ ) { Type type = colTypes [ i ] ; data [ i ] = type . convertToTypeLimits ( session , data [ i ] ) ; if ( type . isDomainType ( ) ) { Constraint [ ] constraints = type . userTypeModifier . getConstraints ( ) ; for ( int j = 0 ; j < constraints . length ; j ++ ) { constraints [ j ] . checkCheckConstraint ( session , this , data [ i ] ) ; } } if ( data [ i ] == null ) { if ( colNotNull [ i ] ) { Constraint c = getNotNullConstraintForColumn ( i ) ; if ( c == null ) { if ( getColumn ( i ) . isPrimaryKey ( ) ) { c = this . getPrimaryConstraint ( ) ; } } String [ ] info = new String [ ] { c . getName ( ) . name , tableName . name } ; throw Error . error ( ErrorCode . X_23503 , ErrorCode . CONSTRAINT , info ) ; } } } } | Enforce max field sizes according to SQL column definition . SQL92 13 . 8 |
30,783 | Index getIndexForColumns ( int [ ] cols ) { int i = bestIndexForColumn [ cols [ 0 ] ] ; if ( i > - 1 ) { return indexList [ i ] ; } switch ( tableType ) { case TableBase . SYSTEM_SUBQUERY : case TableBase . SYSTEM_TABLE : case TableBase . VIEW_TABLE : case TableBase . TEMP_TABLE : { Index index = createIndexForColumns ( cols ) ; return index ; } } return null ; } | Finds an existing index for a column group |
30,784 | Index getIndexForColumns ( OrderedIntHashSet set ) { int maxMatchCount = 0 ; Index selected = null ; if ( set . isEmpty ( ) ) { return null ; } for ( int i = 0 , count = indexList . length ; i < count ; i ++ ) { Index currentindex = getIndex ( i ) ; int [ ] indexcols = currentindex . getColumns ( ) ; int matchCount = set . getOrderedMatchCount ( indexcols ) ; if ( matchCount == 0 ) { continue ; } if ( matchCount == indexcols . length ) { return currentindex ; } if ( matchCount > maxMatchCount ) { maxMatchCount = matchCount ; selected = currentindex ; } } if ( selected != null ) { return selected ; } switch ( tableType ) { case TableBase . SYSTEM_SUBQUERY : case TableBase . SYSTEM_TABLE : case TableBase . VIEW_TABLE : case TableBase . TEMP_TABLE : { selected = createIndexForColumns ( set . toArray ( ) ) ; } } return selected ; } | Finds an existing index for a column set or create one for temporary tables |
30,785 | public final int [ ] getIndexRootsArray ( ) { PersistentStore store = database . persistentStoreCollection . getStore ( this ) ; int [ ] roots = new int [ getIndexCount ( ) ] ; for ( int i = 0 ; i < getIndexCount ( ) ; i ++ ) { CachedObject accessor = store . getAccessor ( indexList [ i ] ) ; roots [ i ] = accessor == null ? - 1 : accessor . getPos ( ) ; } return roots ; } | Return the list of file pointers to root nodes for this table s indexes . |
30,786 | void setIndexRoots ( Session session , String s ) { if ( ! isCached ) { throw Error . error ( ErrorCode . X_42501 , tableName . name ) ; } ParserDQL p = new ParserDQL ( session , new Scanner ( s ) ) ; int [ ] roots = new int [ getIndexCount ( ) ] ; p . read ( ) ; for ( int i = 0 ; i < getIndexCount ( ) ; i ++ ) { int v = p . readInteger ( ) ; roots [ i ] = v ; } setIndexRoots ( roots ) ; } | Sets the index roots and next identity . |
30,787 | public void dropIndex ( Session session , String indexname ) { int todrop = getIndexIndex ( indexname ) ; indexList = ( Index [ ] ) ArrayUtil . toAdjustedArray ( indexList , null , todrop , - 1 ) ; for ( int i = 0 ; i < indexList . length ; i ++ ) { indexList [ i ] . setPosition ( i ) ; } setBestRowIdentifiers ( ) ; if ( store != null ) { store . resetAccessorKeys ( indexList ) ; } } | Performs Table structure modification and changes to the index nodes to remove a given index from a MEMORY or TEXT table . Not for PK index . |
30,788 | void insertRow ( Session session , PersistentStore store , Object [ ] data ) { setIdentityColumn ( session , data ) ; if ( triggerLists [ Trigger . INSERT_BEFORE ] . length != 0 ) { fireBeforeTriggers ( session , Trigger . INSERT_BEFORE , null , data , null ) ; } if ( isView ) { return ; } checkRowDataInsert ( session , data ) ; insertNoCheck ( session , store , data ) ; } | Mid level method for inserting rows . Performs constraint checks and fires row level triggers . |
30,789 | void insertIntoTable ( Session session , Result result ) { PersistentStore store = session . sessionData . getRowStore ( this ) ; RowSetNavigator nav = result . initialiseNavigator ( ) ; while ( nav . hasNext ( ) ) { Object [ ] data = nav . getNext ( ) ; Object [ ] newData = ( Object [ ] ) ArrayUtil . resizeArrayIfDifferent ( data , getColumnCount ( ) ) ; insertData ( store , newData ) ; } } | Multi - row insert method . Used for CREATE TABLE AS ... queries . |
30,790 | private Row insertNoCheck ( Session session , PersistentStore store , Object [ ] data ) { Row row = ( Row ) store . getNewCachedObject ( session , data ) ; store . indexRow ( session , row ) ; session . addInsertAction ( this , row ) ; return row ; } | Low level method for row insert . UNIQUE or PRIMARY constraints are enforced by attempting to add the row to the indexes . |
30,791 | public int insertSys ( PersistentStore store , Result ins ) { RowSetNavigator nav = ins . getNavigator ( ) ; int count = 0 ; while ( nav . hasNext ( ) ) { insertSys ( store , nav . getNext ( ) ) ; count ++ ; } return count ; } | Used for system table inserts . No checks . No identity columns . |
30,792 | void insertResult ( PersistentStore store , Result ins ) { RowSetNavigator nav = ins . initialiseNavigator ( ) ; while ( nav . hasNext ( ) ) { Object [ ] data = nav . getNext ( ) ; Object [ ] newData = ( Object [ ] ) ArrayUtil . resizeArrayIfDifferent ( data , getColumnCount ( ) ) ; insertData ( store , newData ) ; } } | Used for subquery inserts . No checks . No identity columns . |
30,793 | public void insertFromScript ( PersistentStore store , Object [ ] data ) { systemUpdateIdentityValue ( data ) ; insertData ( store , data ) ; } | Not for general use . Used by ScriptReader to unconditionally insert a row into the table when the . script file is read . |
30,794 | protected void systemUpdateIdentityValue ( Object [ ] data ) { if ( identityColumn != - 1 ) { Number id = ( Number ) data [ identityColumn ] ; if ( id != null ) { identitySequence . systemUpdate ( id . longValue ( ) ) ; } } } | If there is an identity column in the table sets the max identity value . |
30,795 | void deleteNoRefCheck ( Session session , Row row ) { Object [ ] data = row . getData ( ) ; fireBeforeTriggers ( session , Trigger . DELETE_BEFORE , data , null , null ) ; if ( isView ) { return ; } deleteNoCheck ( session , row ) ; } | Mid level row delete method . Fires triggers but no integrity constraint checks . |
30,796 | private void deleteNoCheck ( Session session , Row row ) { if ( row . isDeleted ( session ) ) { return ; } session . addDeleteAction ( this , row ) ; } | Low level row delete method . Removes the row from the indexes and from the Cache . |
30,797 | public void deleteNoCheckFromLog ( Session session , Object [ ] data ) { Row row = null ; PersistentStore store = session . sessionData . getRowStore ( this ) ; if ( hasPrimaryKey ( ) ) { RowIterator it = getPrimaryIndex ( ) . findFirstRow ( session , store , data , primaryKeyColsSequence ) ; row = it . getNextRow ( ) ; } else if ( bestIndex == null ) { RowIterator it = rowIterator ( session ) ; while ( true ) { row = it . getNextRow ( ) ; if ( row == null ) { break ; } if ( IndexAVL . compareRows ( row . getData ( ) , data , defaultColumnMap , colTypes ) == 0 ) { break ; } } } else { RowIterator it = bestIndex . findFirstRow ( session , store , data ) ; while ( true ) { row = it . getNextRow ( ) ; if ( row == null ) { break ; } Object [ ] rowdata = row . getData ( ) ; if ( bestIndex . compareRowNonUnique ( data , bestIndex . getColumns ( ) , rowdata ) != 0 ) { row = null ; break ; } if ( IndexAVL . compareRows ( rowdata , data , defaultColumnMap , colTypes ) == 0 ) { break ; } } } if ( row == null ) { return ; } deleteNoCheck ( session , row ) ; } | For log statements . Delete a single row . |
30,798 | public void addTTL ( int ttlValue , String ttlUnit , String ttlColumn , int batchSize , int maxFrequency , String streamName ) { dropTTL ( ) ; timeToLive = new TimeToLiveVoltDB ( ttlValue , ttlUnit , getColumn ( findColumn ( ttlColumn ) ) , batchSize , maxFrequency , streamName ) ; } | A VoltDB extension to support TTL |
30,799 | static public int getStart ( int field ) { Integer iObject = ( Integer ) starts . get ( new Integer ( field ) ) ; if ( iObject == null ) { throw new IllegalArgumentException ( RB . singleton . getString ( RB . UNEXPECTED_HEADER_KEY , field ) ) ; } return iObject . intValue ( ) ; } | not some problem with a Header or generating or reading a Header . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.