idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
31,700
public long longValue ( String name ) { try { return Long . valueOf ( this . getArgumentByName ( name ) . Value ) ; } catch ( NullPointerException npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } catch ( Exception x ) { printErrorAndQuit ( String . format ( "Argument '%s' could not be cast to type: 'long'." , name ) ) ; } return - 1 ; }
Retrieves the value of an argument as a long .
31,701
public double doubleValue ( String name ) { try { return Double . valueOf ( this . getArgumentByName ( name ) . Value ) ; } catch ( NullPointerException npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } catch ( Exception x ) { printErrorAndQuit ( String . format ( "Argument '%s' could not be cast to type: 'double'." , name ) ) ; } return - 1 ; }
Retrieves the value of an argument as a double .
31,702
public String stringValue ( String name ) { try { return this . getArgumentByName ( name ) . Value ; } catch ( Exception npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } return null ; }
Retrieves the value of an argument as a string .
31,703
public boolean booleanValue ( String name ) { try { return Boolean . valueOf ( this . getArgumentByName ( name ) . Value ) ; } catch ( NullPointerException npe ) { printErrorAndQuit ( String . format ( "Argument '%s' was not provided." , name ) ) ; } catch ( Exception x ) { printErrorAndQuit ( String . format ( "Argument '%s' could not be cast to type: 'boolean'." , name ) ) ; } return false ; }
Retrieves the value of an argument as a boolean .
31,704
public void setBounds ( int x , int y , int w , int h ) { super . setBounds ( x , y , w , h ) ; iSbHeight = sbHoriz . getPreferredSize ( ) . height ; iSbWidth = sbVert . getPreferredSize ( ) . width ; iHeight = h - iSbHeight ; iWidth = w - iSbWidth ; sbHoriz . setBounds ( 0 , iHeight , iWidth , iSbHeight ) ; sbVert . setBounds ( iWidth , 0 , iSbWidth , iHeight ) ; adjustScroll ( ) ; iImage = null ; repaint ( ) ; }
with additional replacement of deprecated methods
31,705
private String write ( String logDir ) throws IOException , ExecutionException , InterruptedException { final File file = new File ( logDir , "trace_" + System . currentTimeMillis ( ) + ".json.gz" ) ; if ( file . exists ( ) ) { throw new IOException ( "Trace file " + file . getAbsolutePath ( ) + " already exists" ) ; } if ( ! file . getParentFile ( ) . canWrite ( ) || ! file . getParentFile ( ) . canExecute ( ) ) { throw new IOException ( "Trace file " + file . getAbsolutePath ( ) + " is not writable" ) ; } SettableFuture < Future < ? > > f = SettableFuture . create ( ) ; m_work . offer ( ( ) -> f . set ( dumpEvents ( file ) ) ) ; final Future < ? > writeFuture = f . get ( ) ; if ( writeFuture != null ) { writeFuture . get ( ) ; return file . getAbsolutePath ( ) ; } else { return null ; } }
Write the events in the queue to file .
31,706
public static TraceEventBatch log ( Category cat ) { final VoltTrace tracer = s_tracer ; if ( tracer != null && tracer . isCategoryEnabled ( cat ) ) { final TraceEventBatch batch = new TraceEventBatch ( cat ) ; tracer . queueEvent ( batch ) ; return batch ; } else { return null ; } }
Create a trace event batch for the given category . The events that go into this batch should all originate from the same thread .
31,707
public static String closeAllAndShutdown ( String logDir , long timeOutMillis ) throws IOException { String path = null ; final VoltTrace tracer = s_tracer ; if ( tracer != null ) { if ( logDir != null ) { path = dump ( logDir ) ; } s_tracer = null ; if ( timeOutMillis >= 0 ) { try { tracer . m_writerThread . shutdownNow ( ) ; tracer . m_writerThread . awaitTermination ( timeOutMillis , TimeUnit . MILLISECONDS ) ; } catch ( InterruptedException e ) { } } tracer . shutdown ( ) ; } return path ; }
Close all open files and wait for shutdown .
31,708
private static synchronized void start ( ) throws IOException { if ( s_tracer == null ) { final VoltTrace tracer = new VoltTrace ( ) ; final Thread thread = new Thread ( tracer ) ; thread . setDaemon ( true ) ; thread . start ( ) ; s_tracer = tracer ; } }
Creates and starts a new tracer . If one already exists this is a no - op . Synchronized to prevent multiple threads enabling it at the same time .
31,709
public static String dump ( String logDir ) throws IOException { String path = null ; final VoltTrace tracer = s_tracer ; if ( tracer != null ) { final File dir = new File ( logDir ) ; if ( ! dir . getParentFile ( ) . canWrite ( ) || ! dir . getParentFile ( ) . canExecute ( ) ) { throw new IOException ( "Trace log parent directory " + dir . getParentFile ( ) . getAbsolutePath ( ) + " is not writable" ) ; } if ( ! dir . exists ( ) ) { if ( ! dir . mkdir ( ) ) { throw new IOException ( "Failed to create trace log directory " + dir . getAbsolutePath ( ) ) ; } } try { path = tracer . write ( logDir ) ; } catch ( Exception e ) { s_logger . info ( "Unable to write trace file: " + e . getMessage ( ) , e ) ; } } return path ; }
Write all trace events in the queue to file .
31,710
public static void enableCategories ( Category ... categories ) throws IOException { if ( s_tracer == null ) { start ( ) ; } final VoltTrace tracer = s_tracer ; assert tracer != null ; final ImmutableSet . Builder < Category > builder = ImmutableSet . builder ( ) ; builder . addAll ( tracer . m_enabledCategories ) ; builder . addAll ( Arrays . asList ( categories ) ) ; tracer . m_enabledCategories = builder . build ( ) ; }
Enable the given categories . If the tracer is not running at the moment create a new one .
31,711
public static void disableCategories ( Category ... categories ) { final VoltTrace tracer = s_tracer ; if ( tracer == null ) { return ; } final List < Category > toDisable = Arrays . asList ( categories ) ; final ImmutableSet . Builder < Category > builder = ImmutableSet . builder ( ) ; for ( Category enabledCategory : tracer . m_enabledCategories ) { if ( ! toDisable . contains ( enabledCategory ) ) { builder . add ( enabledCategory ) ; } } final ImmutableSet < Category > enabledCategories = builder . build ( ) ; if ( enabledCategories . isEmpty ( ) ) { try { closeAllAndShutdown ( null , 0 ) ; } catch ( IOException e ) { } } else { tracer . m_enabledCategories = enabledCategories ; } }
Disable the given categories . If the tracer has no enabled category after this call shutdown the tracer .
31,712
public Thread newThread ( Runnable r ) { return factory == this ? new Thread ( r ) : factory . newThread ( r ) ; }
Retreives a thread instance for running the specified Runnable
31,713
public synchronized ThreadFactory setImpl ( ThreadFactory f ) { ThreadFactory old ; old = factory ; factory = ( f == null ) ? this : f ; return old ; }
Sets the factory implementation that this factory will use to produce threads . If the specified argument f is null then this factory uses itself as the implementation .
31,714
final byte [ ] getRaw ( int columnIndex ) { byte [ ] retval ; int pos = m_buffer . position ( ) ; int offset = getOffset ( columnIndex ) ; VoltType type = getColumnType ( columnIndex ) ; switch ( type ) { case TINYINT : case SMALLINT : case INTEGER : case BIGINT : case TIMESTAMP : case FLOAT : case DECIMAL : case GEOGRAPHY_POINT : { int length = type . getLengthInBytesForFixedTypesWithoutCheck ( ) ; retval = new byte [ length ] ; m_buffer . position ( offset ) ; m_buffer . get ( retval ) ; m_buffer . position ( pos ) ; return retval ; } case STRING : case VARBINARY : case GEOGRAPHY : { int length = m_buffer . getInt ( offset ) ; if ( length == VoltTable . NULL_STRING_INDICATOR ) { length = 0 ; } length += 4 ; retval = new byte [ length ] ; m_buffer . position ( offset ) ; m_buffer . get ( retval ) ; m_buffer . position ( pos ) ; return retval ; } default : throw new RuntimeException ( "Unknown type" ) ; } }
A way to get a column value in raw byte form without doing any expensive conversions like date processing or string encoding .
31,715
final void validateColumnType ( int columnIndex , VoltType ... types ) { if ( m_position < 0 ) throw new RuntimeException ( "VoltTableRow is in an invalid state. Consider calling advanceRow()." ) ; if ( ( columnIndex >= getColumnCount ( ) ) || ( columnIndex < 0 ) ) { throw new IndexOutOfBoundsException ( "Column index " + columnIndex + " is greater than the number of columns" ) ; } final VoltType columnType = getColumnType ( columnIndex ) ; for ( VoltType type : types ) if ( columnType == type ) return ; throw new IllegalArgumentException ( "Column index " + columnIndex + " is type " + columnType ) ; }
Validates that type and columnIndex match and are valid .
31,716
final String readString ( int position , Charset encoding ) { if ( STRING_LEN_SIZE > m_buffer . limit ( ) - position ) { throw new RuntimeException ( String . format ( "VoltTableRow::readString: Can't read string size as %d byte integer " + "from buffer with %d bytes remaining." , STRING_LEN_SIZE , m_buffer . limit ( ) - position ) ) ; } final int len = m_buffer . getInt ( position ) ; if ( len == VoltTable . NULL_STRING_INDICATOR ) return null ; if ( len < 0 ) { throw new RuntimeException ( "Invalid object length." ) ; } if ( position + STRING_LEN_SIZE + len > m_buffer . limit ( ) ) { throw new RuntimeException ( String . format ( "VoltTableRow::readString: Can't read %d byte string " + "from buffer with %d bytes remaining." , len , m_buffer . limit ( ) - position - STRING_LEN_SIZE ) ) ; } byte [ ] stringData = new byte [ len ] ; int oldPos = m_buffer . position ( ) ; m_buffer . position ( position + STRING_LEN_SIZE ) ; m_buffer . get ( stringData ) ; m_buffer . position ( oldPos ) ; return new String ( stringData , encoding ) ; }
Reads a string from a buffer with a specific encoding .
31,717
public int appendTask ( long sourceHSId , TransactionInfoBaseMessage task ) throws IOException { Preconditions . checkState ( compiledSize == 0 , "buffer is already compiled" ) ; final int msgSerializedSize = task . getSerializedSize ( ) ; ensureCapacity ( taskHeaderSize ( ) + msgSerializedSize ) ; ByteBuffer bb = m_container . b ( ) ; bb . putInt ( msgSerializedSize ) ; bb . putLong ( sourceHSId ) ; int limit = bb . limit ( ) ; bb . limit ( bb . position ( ) + msgSerializedSize ) ; task . flattenToBuffer ( bb . slice ( ) ) ; bb . limit ( limit ) ; bb . position ( bb . position ( ) + msgSerializedSize ) ; if ( bb . position ( ) + taskHeaderSize ( ) > DEFAULT_BUFFER_SIZE ) { compile ( ) ; return 0 ; } else { return DEFAULT_BUFFER_SIZE - ( bb . position ( ) + taskHeaderSize ( ) ) ; } }
Appends a task message to the buffer .
31,718
public TransactionInfoBaseMessage nextTask ( ) throws IOException { if ( ! hasMoreEntries ( ) ) { return null ; } ByteBuffer bb = m_container . b ( ) ; int position = bb . position ( ) ; int length = bb . getInt ( ) ; long sourceHSId = bb . getLong ( ) ; VoltDbMessageFactory factory = new VoltDbMessageFactory ( ) ; final int oldLimit = bb . limit ( ) ; bb . limit ( bb . position ( ) + length ) ; ByteBuffer slice = bb . slice ( ) ; bb . limit ( oldLimit ) ; VoltMessage msg = factory . createMessageFromBuffer ( slice , sourceHSId ) ; bb . position ( position + length + 8 + 4 ) ; return ( TransactionInfoBaseMessage ) msg ; }
Get the next task message in this buffer .
31,719
public void compile ( ) { if ( compiledSize == 0 ) { ByteBuffer bb = m_container . b ( ) ; compiledSize = bb . position ( ) ; bb . flip ( ) ; m_allocator . track ( compiledSize ) ; } if ( log . isTraceEnabled ( ) ) { StringBuilder sb = new StringBuilder ( "Compiling buffer: " ) ; ByteBuffer dup = m_container . bDR ( ) ; while ( dup . hasRemaining ( ) ) { sb . append ( " " ) . append ( dup . get ( ) ) ; } log . trace ( sb . toString ( ) ) ; } }
Generate the byte array in preparation of moving over a message bus . Idempotent but not thread - safe . Also changes state to immutable .
31,720
void updateCatalog ( String diffCmds , CatalogContext context ) { if ( m_shuttingDown ) { return ; } m_catalogContext = context ; Iterator < MpRoSiteContext > siterator = m_idleSites . iterator ( ) ; while ( siterator . hasNext ( ) ) { MpRoSiteContext site = siterator . next ( ) ; if ( site . getCatalogCRC ( ) != m_catalogContext . getCatalogCRC ( ) || site . getCatalogVersion ( ) != m_catalogContext . catalogVersion ) { site . shutdown ( ) ; m_idleSites . remove ( site ) ; m_allSites . remove ( site ) ; } } }
Update the catalog
31,721
boolean doWork ( long txnId , TransactionTask task ) { boolean retval = canAcceptWork ( ) ; if ( ! retval ) { return false ; } MpRoSiteContext site ; if ( m_busySites . containsKey ( txnId ) ) { site = m_busySites . get ( txnId ) ; } else { if ( m_idleSites . isEmpty ( ) ) { MpRoSiteContext newSite = new MpRoSiteContext ( m_siteId , m_backend , m_catalogContext , m_partitionId , m_initiatorMailbox , m_poolThreadFactory ) ; m_idleSites . push ( newSite ) ; m_allSites . add ( newSite ) ; } site = m_idleSites . pop ( ) ; m_busySites . put ( txnId , site ) ; } site . offer ( task ) ; return true ; }
Attempt to start the transaction represented by the given task . Need the txn ID for future reference .
31,722
void completeWork ( long txnId ) { if ( m_shuttingDown ) { return ; } MpRoSiteContext site = m_busySites . remove ( txnId ) ; if ( site == null ) { throw new RuntimeException ( "No busy site for txnID: " + txnId + " found, shouldn't happen." ) ; } if ( site . getCatalogCRC ( ) == m_catalogContext . getCatalogCRC ( ) && site . getCatalogVersion ( ) == m_catalogContext . catalogVersion ) { m_idleSites . push ( site ) ; } else { site . shutdown ( ) ; m_allSites . remove ( site ) ; } }
Inform the pool that the work associated with the given txnID is complete
31,723
public VoltTable [ ] run ( SystemProcedureExecutionContext ctx ) { VoltTable [ ] result = null ; try { result = createAndExecuteSysProcPlan ( SysProcFragmentId . PF_quiesce_sites , SysProcFragmentId . PF_quiesce_processed_sites ) ; } catch ( Exception ex ) { ex . printStackTrace ( ) ; } return result ; }
There are no user specified parameters .
31,724
public static void writeFile ( final String dir , final String filename , String content , boolean debug ) { if ( debug && ! VoltCompiler . DEBUG_MODE ) { return ; } if ( m_debugRoot == null ) { if ( System . getenv ( "TEST_DIR" ) != null ) { m_debugRoot = System . getenv ( "TEST_DIR" ) + File . separator + debugRootPrefix ; } else { m_debugRoot = debugRootPrefix ; } } if ( m_userRoot == null ) { if ( System . getenv ( "TEST_DIR" ) != null ) { m_userRoot = System . getenv ( "TEST_DIR" ) + File . separator + userRootPrefix ; } else { m_userRoot = userRootPrefix ; } } String root = debug ? m_debugRoot : m_userRoot ; String subFolderPath = root ; if ( dir != null ) { subFolderPath += File . separator + dir ; } if ( ! m_seenPaths . contains ( subFolderPath ) ) { File f = new File ( subFolderPath ) ; f . mkdirs ( ) ; m_seenPaths . add ( subFolderPath ) ; } String filepath = subFolderPath + File . separator + filename ; File f = new File ( filepath ) ; PrintStream streamOut = null ; try { streamOut = new PrintStream ( f ) ; } catch ( FileNotFoundException e ) { e . printStackTrace ( ) ; return ; } streamOut . println ( content ) ; streamOut . close ( ) ; }
Write a file to disk during compilation that has some neato info generated during compilation . If the debug flag is true that means this file should only be written if the compiler is running in debug mode .
31,725
public AbstractExpression getAllFilters ( ) { ArrayDeque < JoinNode > joinNodes = new ArrayDeque < > ( ) ; ArrayDeque < AbstractExpression > in = new ArrayDeque < > ( ) ; ArrayDeque < AbstractExpression > out = new ArrayDeque < > ( ) ; joinNodes . add ( this ) ; while ( ! joinNodes . isEmpty ( ) ) { JoinNode joinNode = joinNodes . poll ( ) ; if ( joinNode . m_joinExpr != null ) { in . add ( joinNode . m_joinExpr ) ; } if ( joinNode . m_whereExpr != null ) { in . add ( joinNode . m_whereExpr ) ; } joinNode . queueChildren ( joinNodes ) ; } AbstractExpression inExpr = null ; while ( ( inExpr = in . poll ( ) ) != null ) { if ( inExpr . getExpressionType ( ) == ExpressionType . CONJUNCTION_AND ) { in . add ( inExpr . getLeft ( ) ) ; in . add ( inExpr . getRight ( ) ) ; } else { out . add ( inExpr ) ; } } return ExpressionUtil . combinePredicates ( out ) ; }
Collect all JOIN and WHERE expressions combined with AND for the entire tree .
31,726
public AbstractExpression getSimpleFilterExpression ( ) { if ( m_whereExpr != null ) { if ( m_joinExpr != null ) { return ExpressionUtil . combine ( m_whereExpr , m_joinExpr ) ; } return m_whereExpr ; } return m_joinExpr ; }
Get the WHERE expression for a single - table statement .
31,727
public List < JoinNode > generateAllNodesJoinOrder ( ) { ArrayList < JoinNode > nodes = new ArrayList < > ( ) ; listNodesJoinOrderRecursive ( nodes , true ) ; return nodes ; }
Returns nodes in the order they are joined in the tree by iterating the tree depth - first
31,728
public List < JoinNode > extractSubTrees ( ) { List < JoinNode > subTrees = new ArrayList < > ( ) ; subTrees . add ( this ) ; List < JoinNode > leafNodes = new ArrayList < > ( ) ; extractSubTree ( leafNodes ) ; for ( JoinNode leaf : leafNodes ) { subTrees . addAll ( leaf . extractSubTrees ( ) ) ; } return subTrees ; }
Split a join tree into one or more sub - trees . Each sub - tree has the same join type for all join nodes . The root of the child tree in the parent tree is replaced with a dummy node which id is negated id of the child root node .
31,729
public static JoinNode reconstructJoinTreeFromTableNodes ( List < JoinNode > tableNodes , JoinType joinType ) { JoinNode root = null ; for ( JoinNode leafNode : tableNodes ) { JoinNode node = leafNode . cloneWithoutFilters ( ) ; if ( root == null ) { root = node ; } else { root = new BranchNode ( - node . m_id , joinType , root , node ) ; } } return root ; }
Reconstruct a join tree from the list of tables always appending the next node to the right .
31,730
public static JoinNode reconstructJoinTreeFromSubTrees ( List < JoinNode > subTrees ) { if ( subTrees == null || subTrees . isEmpty ( ) ) { return null ; } JoinNode joinNode = subTrees . get ( 0 ) ; for ( int i = 1 ; i < subTrees . size ( ) ; ++ i ) { JoinNode nextNode = subTrees . get ( i ) ; boolean replaced = joinNode . replaceChild ( nextNode ) ; assert ( replaced ) ; } return joinNode ; }
Reconstruct a join tree from the list of sub - trees connecting the sub - trees in the order they appear in the list . The list of sub - trees must be initially obtained by calling the extractSubTrees method on the original tree .
31,731
protected static void applyTransitiveEquivalence ( List < AbstractExpression > outerTableExprs , List < AbstractExpression > innerTableExprs , List < AbstractExpression > innerOuterTableExprs ) { List < AbstractExpression > simplifiedOuterExprs = applyTransitiveEquivalence ( innerTableExprs , innerOuterTableExprs ) ; List < AbstractExpression > simplifiedInnerExprs = applyTransitiveEquivalence ( outerTableExprs , innerOuterTableExprs ) ; outerTableExprs . addAll ( simplifiedOuterExprs ) ; innerTableExprs . addAll ( simplifiedInnerExprs ) ; }
Apply implied transitive constant filter to join expressions outer . partkey = ? and outer . partkey = inner . partkey is equivalent to outer . partkey = ? and inner . partkey = ?
31,732
protected static void classifyJoinExpressions ( Collection < AbstractExpression > exprList , Collection < String > outerTables , Collection < String > innerTables , List < AbstractExpression > outerList , List < AbstractExpression > innerList , List < AbstractExpression > innerOuterList , List < AbstractExpression > noneList ) { HashSet < String > tableAliasSet = new HashSet < > ( ) ; HashSet < String > outerSet = new HashSet < > ( outerTables ) ; HashSet < String > innerSet = new HashSet < > ( innerTables ) ; for ( AbstractExpression expr : exprList ) { tableAliasSet . clear ( ) ; getTablesForExpression ( expr , tableAliasSet ) ; String tableAliases [ ] = tableAliasSet . toArray ( new String [ 0 ] ) ; if ( tableAliasSet . isEmpty ( ) ) { noneList . add ( expr ) ; } else { boolean outer = false ; boolean inner = false ; for ( String alias : tableAliases ) { outer = outer || outerSet . contains ( alias ) ; inner = inner || innerSet . contains ( alias ) ; } if ( outer && inner ) { innerOuterList . add ( expr ) ; } else if ( outer ) { outerList . add ( expr ) ; } else if ( inner ) { innerList . add ( expr ) ; } else { assert ( false ) ; } } } }
Split the input expression list into the three categories 1 . TVE expressions with outer tables only 2 . TVE expressions with inner tables only 3 . TVE expressions with inner and outer tables The outer tables are the tables reachable from the outer node of the join The inner tables are the tables reachable from the inner node of the join
31,733
public static HSQLInterface . ParameterStateManager getParamStateManager ( ) { return new ParameterStateManager ( ) { public int getNextParamIndex ( ) { return ParameterizationInfo . getNextParamIndex ( ) ; } public void resetCurrentParamIndex ( ) { ParameterizationInfo . resetCurrentParamIndex ( ) ; } } ; }
This method produces a ParameterStateManager to pass to HSQL so that VoltDB can track the parameters it created when parsing the current statement .
31,734
private static Object decodeNextColumn ( ByteBuffer bb , VoltType columnType ) throws IOException { Object retval = null ; switch ( columnType ) { case TINYINT : retval = decodeTinyInt ( bb ) ; break ; case SMALLINT : retval = decodeSmallInt ( bb ) ; break ; case INTEGER : retval = decodeInteger ( bb ) ; break ; case BIGINT : retval = decodeBigInt ( bb ) ; break ; case FLOAT : retval = decodeFloat ( bb ) ; break ; case TIMESTAMP : retval = decodeTimestamp ( bb ) ; break ; case STRING : retval = decodeString ( bb ) ; break ; case VARBINARY : retval = decodeVarbinary ( bb ) ; break ; case DECIMAL : retval = decodeDecimal ( bb ) ; break ; case GEOGRAPHY_POINT : retval = decodeGeographyPoint ( bb ) ; break ; case GEOGRAPHY : retval = decodeGeography ( bb ) ; break ; default : throw new IOException ( "Invalid column type: " + columnType ) ; } return retval ; }
Rather it decodes the next non - null column in the FastDeserializer
31,735
static public BigDecimal decodeDecimal ( final ByteBuffer bb ) { final int scale = bb . get ( ) ; final int precisionBytes = bb . get ( ) ; final byte [ ] bytes = new byte [ precisionBytes ] ; bb . get ( bytes ) ; return new BigDecimal ( new BigInteger ( bytes ) , scale ) ; }
Read a decimal according to the Four Dot Four encoding specification .
31,736
static public Object decodeVarbinary ( final ByteBuffer bb ) { final int length = bb . getInt ( ) ; final byte [ ] data = new byte [ length ] ; bb . get ( data ) ; return data ; }
Read a varbinary according to the Export encoding specification
31,737
static public GeographyValue decodeGeography ( final ByteBuffer bb ) { final int strLength = bb . getInt ( ) ; final int startPosition = bb . position ( ) ; GeographyValue gv = GeographyValue . unflattenFromBuffer ( bb ) ; assert ( bb . position ( ) - startPosition == strLength ) ; return gv ; }
Read a geography according to the Four Dot Four Export encoding specification .
31,738
public final Iterable < T > children ( final T root ) { checkNotNull ( root ) ; return new FluentIterable < T > ( ) { public Iterator < T > iterator ( ) { return new AbstractIterator < T > ( ) { boolean doneLeft ; boolean doneRight ; protected T computeNext ( ) { if ( ! doneLeft ) { doneLeft = true ; Optional < T > left = leftChild ( root ) ; if ( left . isPresent ( ) ) { return left . get ( ) ; } } if ( ! doneRight ) { doneRight = true ; Optional < T > right = rightChild ( root ) ; if ( right . isPresent ( ) ) { return right . get ( ) ; } } return endOfData ( ) ; } } ; } } ; }
Returns the children of this node in left - to - right order .
31,739
private boolean processTrueOrFalse ( ) { if ( token . tokenType == Tokens . TRUE ) { read ( ) ; return true ; } else if ( token . tokenType == Tokens . FALSE ) { read ( ) ; return false ; } else { throw unexpectedToken ( ) ; } }
Retrieves boolean value corresponding to the next token .
31,740
public VoltTable sortByAverage ( String tableName ) { List < ProcProfRow > sorted = new ArrayList < ProcProfRow > ( m_table ) ; Collections . sort ( sorted , new Comparator < ProcProfRow > ( ) { public int compare ( ProcProfRow lhs , ProcProfRow rhs ) { return compareByAvg ( rhs , lhs ) ; } } ) ; long sumOfAverage = 0L ; for ( ProcProfRow row : sorted ) { sumOfAverage += ( row . avg * row . invocations ) ; } VoltTable result = TableShorthand . tableFromShorthand ( tableName + "(TIMESTAMP:BIGINT, PROCEDURE:VARCHAR, WEIGHTED_PERC:BIGINT, INVOCATIONS:BIGINT," + "AVG:BIGINT, MIN:BIGINT, MAX:BIGINT, ABORTS:BIGINT, FAILURES:BIGINT)" ) ; for ( ProcProfRow row : sorted ) { result . addRow ( row . timestamp , row . procedure , calculatePercent ( row . avg * row . invocations , sumOfAverage ) , row . invocations , row . avg , row . min , row . max , row . aborts , row . failures ) ; } return result ; }
Return table sorted by weighted avg
31,741
public int compareByAvg ( ProcProfRow lhs , ProcProfRow rhs ) { if ( lhs . avg * lhs . invocations > rhs . avg * rhs . invocations ) { return 1 ; } else if ( lhs . avg * lhs . invocations < rhs . avg * rhs . invocations ) { return - 1 ; } else { return 0 ; } }
Sort by average weighting the sampled average by the real invocation count .
31,742
void doInitiation ( RejoinMessage message ) { m_coordinatorHsId = message . m_sourceHSId ; m_hasPersistentTables = message . schemaHasPersistentTables ( ) ; if ( m_hasPersistentTables ) { m_streamSnapshotMb = VoltDB . instance ( ) . getHostMessenger ( ) . createMailbox ( ) ; m_rejoinSiteProcessor = new StreamSnapshotSink ( m_streamSnapshotMb ) ; kickWatchdog ( TimerCallback . initialTimer ( ) ) ; } else { m_streamSnapshotMb = null ; m_rejoinSiteProcessor = null ; } long sourceSite = m_mailbox . getMasterHsId ( m_partitionId ) ; boolean haveTwoSources = VoltDB . instance ( ) . getLowestPartitionId ( ) != m_partitionId ; long hsId = ( m_rejoinSiteProcessor != null ? m_rejoinSiteProcessor . initialize ( haveTwoSources ? 2 : 1 , message . getSnapshotDataBufferPool ( ) , message . getSnapshotCompressedDataBufferPool ( ) ) : Long . MIN_VALUE ) ; REJOINLOG . debug ( m_whoami + "received INITIATION message. Doing rejoin" + ". Source site is: " + CoreUtils . hsIdToString ( sourceSite ) + " and destination rejoin processor is: " + CoreUtils . hsIdToString ( hsId ) + " and snapshot nonce is: " + message . getSnapshotNonce ( ) ) ; registerSnapshotMonitor ( message . getSnapshotNonce ( ) ) ; RejoinMessage initResp = new RejoinMessage ( m_mailbox . getHSId ( ) , sourceSite , hsId ) ; m_mailbox . send ( m_coordinatorHsId , initResp ) ; m_taskQueue . offer ( this ) ; }
Runs when the RejoinCoordinator decides this site should start rejoin .
31,743
void updateTableIndexRoots ( ) { HsqlArrayList allTables = database . schemaManager . getAllTables ( ) ; for ( int i = 0 , size = allTables . size ( ) ; i < size ; i ++ ) { Table t = ( Table ) allTables . get ( i ) ; if ( t . getTableType ( ) == TableBase . CACHED_TABLE ) { int [ ] rootsArray = rootsList [ i ] ; t . setIndexRoots ( rootsArray ) ; } } }
called from outside after the complete end of defrag
31,744
public final void sendSentinel ( long txnId , int partitionId ) { final long initiatorHSId = m_cartographer . getHSIdForSinglePartitionMaster ( partitionId ) ; sendSentinel ( txnId , initiatorHSId , - 1 , - 1 , true ) ; }
Send a command log replay sentinel to the given partition .
31,745
private final ClientResponseImpl dispatchLoadSinglepartitionTable ( Procedure catProc , StoredProcedureInvocation task , InvocationClientHandler handler , Connection ccxn ) { int partition = - 1 ; try { CatalogMap < Table > tables = m_catalogContext . get ( ) . database . getTables ( ) ; int partitionParamType = getLoadSinglePartitionTablePartitionParamType ( tables , task ) ; byte [ ] valueToHash = ( byte [ ] ) task . getParameterAtIndex ( 0 ) ; partition = TheHashinator . getPartitionForParameter ( partitionParamType , valueToHash ) ; } catch ( Exception e ) { authLog . warn ( e . getMessage ( ) ) ; return new ClientResponseImpl ( ClientResponseImpl . UNEXPECTED_FAILURE , new VoltTable [ 0 ] , e . getMessage ( ) , task . clientHandle ) ; } assert ( partition != - 1 ) ; createTransaction ( handler . connectionId ( ) , task , catProc . getReadonly ( ) , catProc . getSinglepartition ( ) , catProc . getEverysite ( ) , new int [ ] { partition } , task . getSerializedSize ( ) , System . nanoTime ( ) ) ; return null ; }
Coward way out of the legacy hashinator hell . LoadSinglepartitionTable gets the partitioning parameter as a byte array . Legacy hashinator hashes numbers and byte arrays differently so have to convert it back to long if it s a number . UGLY!!!
31,746
public void handleAllHostNTProcedureResponse ( ClientResponseImpl clientResponseData ) { long handle = clientResponseData . getClientHandle ( ) ; ProcedureRunnerNT runner = m_NTProcedureService . m_outstanding . get ( handle ) ; if ( runner == null ) { hostLog . info ( "Run everywhere NTProcedure early returned, probably gets timed out." ) ; return ; } runner . allHostNTProcedureCallback ( clientResponseData ) ; }
Passes responses to NTProcedureService
31,747
private static boolean valueConstantsMatch ( AbstractExpression e1 , AbstractExpression e2 ) { return ( e1 instanceof ParameterValueExpression && e2 instanceof ConstantValueExpression || e1 instanceof ConstantValueExpression && e2 instanceof ParameterValueExpression ) && equalsAsCVE ( e1 , e2 ) ; }
Value comparison between one CVE and one PVE .
31,748
private static boolean equalsAsCVE ( AbstractExpression e1 , AbstractExpression e2 ) { final ConstantValueExpression ce1 = asCVE ( e1 ) , ce2 = asCVE ( e2 ) ; return ce1 == null || ce2 == null ? ce1 == ce2 : ce1 . equals ( ce2 ) ; }
Check whether two expressions each either a CVE or PVE have same content . \ pre both must be either CVE or PVE .
31,749
private static ConstantValueExpression asCVE ( AbstractExpression expr ) { return expr instanceof ConstantValueExpression ? ( ConstantValueExpression ) expr : ( ( ParameterValueExpression ) expr ) . getOriginalValue ( ) ; }
Convert a ConstantValueExpression or ParameterValueExpression into a ConstantValueExpression . \ pre argument must be either of the two .
31,750
protected void addCorrelationParameterValueExpression ( AbstractExpression expr , List < AbstractExpression > pves ) { int paramIdx = ParameterizationInfo . getNextParamIndex ( ) ; m_parameterIdxList . add ( paramIdx ) ; ParameterValueExpression pve = new ParameterValueExpression ( paramIdx , expr ) ; pves . add ( pve ) ; }
to get the original expression value
31,751
public synchronized CachedObject get ( int pos ) { if ( accessCount == Integer . MAX_VALUE ) { resetAccessCount ( ) ; } int lookup = getLookup ( pos ) ; if ( lookup == - 1 ) { return null ; } accessTable [ lookup ] = accessCount ++ ; return ( CachedObject ) objectValueTable [ lookup ] ; }
Returns a row if in memory cache .
31,752
synchronized void put ( int key , CachedObject row ) { int storageSize = row . getStorageSize ( ) ; if ( size ( ) >= capacity || storageSize + cacheBytesLength > bytesCapacity ) { cleanUp ( ) ; } if ( accessCount == Integer . MAX_VALUE ) { super . resetAccessCount ( ) ; } super . addOrRemove ( key , row , false ) ; row . setInMemory ( true ) ; cacheBytesLength += storageSize ; }
Adds a row to the cache .
31,753
synchronized CachedObject release ( int i ) { CachedObject r = ( CachedObject ) super . addOrRemove ( i , null , true ) ; if ( r == null ) { return null ; } cacheBytesLength -= r . getStorageSize ( ) ; r . setInMemory ( false ) ; return r ; }
Removes an object from memory cache . Does not release the file storage .
31,754
synchronized void saveAll ( ) { Iterator it = new BaseHashIterator ( ) ; int savecount = 0 ; for ( ; it . hasNext ( ) ; ) { CachedObject r = ( CachedObject ) it . next ( ) ; if ( r . hasChanged ( ) ) { rowTable [ savecount ++ ] = r ; } } saveRows ( savecount ) ; Error . printSystemOut ( saveAllTimer . elapsedTimeToMessage ( "Cache.saveRow() total row save time" ) ) ; Error . printSystemOut ( "Cache.saveRow() total row save count = " + saveRowCount ) ; Error . printSystemOut ( makeRowTimer . elapsedTimeToMessage ( "Cache.makeRow() total row load time" ) ) ; Error . printSystemOut ( "Cache.makeRow() total row load count = " + makeRowCount ) ; Error . printSystemOut ( sortTimer . elapsedTimeToMessage ( "Cache.sort() total time" ) ) ; }
Writes out all modified cached Rows .
31,755
public void addAggregate ( ExpressionType aggType , boolean isDistinct , Integer aggOutputColumn , AbstractExpression aggInputExpr ) { m_aggregateTypes . add ( aggType ) ; if ( isDistinct ) { m_aggregateDistinct . add ( 1 ) ; } else { m_aggregateDistinct . add ( 0 ) ; } m_aggregateOutputColumns . add ( aggOutputColumn ) ; if ( aggType . isNullary ( ) ) { assert ( aggInputExpr == null ) ; m_aggregateExpressions . add ( null ) ; } else { assert ( aggInputExpr != null ) ; m_aggregateExpressions . add ( aggInputExpr . clone ( ) ) ; } }
Add an aggregate to this plan node .
31,756
public static AggregatePlanNode convertToSerialAggregatePlanNode ( HashAggregatePlanNode hashAggregateNode ) { AggregatePlanNode serialAggr = new AggregatePlanNode ( ) ; return setAggregatePlanNode ( hashAggregateNode , serialAggr ) ; }
Convert HashAggregate into a Serialized Aggregate
31,757
public static AggregatePlanNode convertToPartialAggregatePlanNode ( HashAggregatePlanNode hashAggregateNode , List < Integer > aggrColumnIdxs ) { final AggregatePlanNode partialAggr = setAggregatePlanNode ( hashAggregateNode , new PartialAggregatePlanNode ( ) ) ; partialAggr . m_partialGroupByColumns = aggrColumnIdxs ; return partialAggr ; }
Convert HashAggregate into a Partial Aggregate
31,758
public String getSQLState ( ) { String state = null ; try { state = new String ( m_sqlState , "UTF-8" ) ; } catch ( UnsupportedEncodingException e ) { throw new RuntimeException ( e ) ; } return state ; }
Retrieve the SQLState code for the error that generated this exception .
31,759
private static String [ ] aggregatePerHostResults ( VoltTable vtable ) { String [ ] ret = new String [ 2 ] ; vtable . advanceRow ( ) ; String kitCheckResult = vtable . getString ( "KIT_CHECK_RESULT" ) ; String rootCheckResult = vtable . getString ( "ROOT_CHECK_RESULT" ) ; String xdcrCheckResult = vtable . getString ( "XDCR_CHECK_RESULT" ) ; StringBuilder result = new StringBuilder ( ) ; if ( ! kitCheckResult . equals ( SUCCESS ) ) { result . append ( kitCheckResult ) . append ( "\n" ) ; } if ( ! rootCheckResult . equals ( SUCCESS ) ) { result . append ( rootCheckResult ) . append ( "\n" ) ; } if ( ! xdcrCheckResult . equals ( SUCCESS ) ) { result . append ( xdcrCheckResult ) ; } if ( result . length ( ) == 0 ) { result . append ( SUCCESS ) ; } ret [ 0 ] = result . toString ( ) ; String warnings = vtable . getString ( "WARNINGS" ) ; if ( warnings != null ) { ret [ 1 ] = warnings ; } return ret ; }
Be user - friendly return reasons of all failed checks .
31,760
public static long millisFromJDBCformat ( String param ) { java . sql . Timestamp sqlTS = java . sql . Timestamp . valueOf ( param ) ; final long fractionalSecondsInNanos = sqlTS . getNanos ( ) ; if ( ( fractionalSecondsInNanos % 1000000 ) != 0 ) { throw new IllegalArgumentException ( "Can't convert from String to Date with fractional milliseconds" ) ; } return sqlTS . getTime ( ) ; }
Given a string parseable by the JDBC Timestamp parser return the fractional component in milliseconds .
31,761
public int compareTo ( TimestampType dateval ) { int comp = m_date . compareTo ( dateval . m_date ) ; if ( comp == 0 ) { return m_usecs - dateval . m_usecs ; } else { return comp ; } }
CompareTo - to mimic Java Date
31,762
public java . sql . Date asExactJavaSqlDate ( ) { if ( m_usecs != 0 ) { throw new RuntimeException ( "Can't convert to sql Date from TimestampType with fractional milliseconds" ) ; } return new java . sql . Date ( m_date . getTime ( ) ) ; }
Retrieve a properly typed copy of the Java date for a TimeStamp with millisecond granularity . The returned date is a copy ; this object will not be affected by modifications of the returned instance .
31,763
public java . sql . Timestamp asJavaTimestamp ( ) { java . sql . Timestamp result = new java . sql . Timestamp ( m_date . getTime ( ) ) ; result . setNanos ( result . getNanos ( ) + m_usecs * 1000 ) ; return result ; }
Retrieve a properly typed copy of the Java Timestamp for the VoltDB TimeStamp . The returned Timestamp is a copy ; this object will not be affected by modifications of the returned instance .
31,764
public boolean load ( ) { boolean exists ; if ( ! DatabaseURL . isFileBasedDatabaseType ( database . getType ( ) ) ) { return true ; } try { exists = super . load ( ) ; } catch ( Exception e ) { throw Error . error ( ErrorCode . FILE_IO_ERROR , ErrorCode . M_LOAD_SAVE_PROPERTIES , new Object [ ] { fileName , e } ) ; } if ( ! exists ) { return false ; } filterLoadedProperties ( ) ; String version = getProperty ( hsqldb_compatible_version ) ; int check = version . substring ( 0 , 5 ) . compareTo ( THIS_VERSION ) ; if ( check > 0 ) { throw Error . error ( ErrorCode . WRONG_DATABASE_FILE_VERSION ) ; } version = getProperty ( db_version ) ; if ( version . charAt ( 2 ) == '6' ) { setProperty ( hsqldb_cache_version , "1.6.0" ) ; } JavaSystem . gcFrequency = getIntegerProperty ( runtime_gc_interval , 0 ) ; return true ; }
Creates file with defaults if it didn t exist . Returns false if file already existed .
31,765
public void setDatabaseVariables ( ) { if ( isPropertyTrue ( db_readonly ) ) { database . setReadOnly ( ) ; } if ( isPropertyTrue ( hsqldb_files_readonly ) ) { database . setFilesReadOnly ( ) ; } database . sqlEnforceStrictSize = isPropertyTrue ( sql_enforce_strict_size ) ; if ( isPropertyTrue ( sql_compare_in_locale ) ) { stringProps . remove ( sql_compare_in_locale ) ; database . collation . setCollationAsLocale ( ) ; } database . setMetaDirty ( false ) ; }
Sets the database member variables after creating the properties object openning a properties file or changing a property with a command
31,766
public void setURLProperties ( HsqlProperties p ) { if ( p != null ) { for ( Enumeration e = p . propertyNames ( ) ; e . hasMoreElements ( ) ; ) { String propertyName = ( String ) e . nextElement ( ) ; Object [ ] row = ( Object [ ] ) meta . get ( propertyName ) ; if ( row != null && ( db_readonly . equals ( propertyName ) || ( ( Integer ) row [ indexType ] ) . intValue ( ) == SET_PROPERTY ) ) { setProperty ( propertyName , p . getProperty ( propertyName ) ) ; } } } }
overload file database properties with any passed on URL line do not store password etc
31,767
private void runSubmissions ( boolean block ) throws InterruptedException { if ( block ) { Runnable r = m_submissionQueue . take ( ) ; do { r . run ( ) ; } while ( ( r = m_submissionQueue . poll ( ) ) != null ) ; } else { Runnable r = null ; while ( ( r = m_submissionQueue . poll ( ) ) != null ) { r . run ( ) ; } } }
if there is no other work to do
31,768
public void queueNotification ( final Collection < ClientInterfaceHandleManager > connections , final Supplier < DeferredSerialization > notification , final Predicate < ClientInterfaceHandleManager > wantsNotificationPredicate ) { m_submissionQueue . offer ( new Runnable ( ) { public void run ( ) { for ( ClientInterfaceHandleManager cihm : connections ) { if ( ! wantsNotificationPredicate . apply ( cihm ) ) continue ; final Connection c = cihm . connection ; Object pendingNotifications = m_clientsPendingNotification . get ( c ) ; try { if ( pendingNotifications == null ) { m_clientsPendingNotification . put ( c , notification ) ; } else if ( pendingNotifications instanceof Supplier ) { if ( pendingNotifications == notification ) return ; @ SuppressWarnings ( "unchecked" ) Node n1 = new Node ( ( Supplier < DeferredSerialization > ) pendingNotifications , null ) ; n1 = m_cachedNodes . get ( n1 , n1 ) ; Node n2 = new Node ( notification , n1 ) ; n2 = m_cachedNodes . get ( n2 , n2 ) ; m_clientsPendingNotification . put ( c , n2 ) ; } else { Node head = ( Node ) pendingNotifications ; boolean dup = false ; while ( head != null ) { if ( head . notification == notification ) { dup = true ; break ; } head = head . next ; } if ( dup ) continue ; Node replacement = new Node ( notification , ( Node ) pendingNotifications ) ; replacement = m_cachedNodes . get ( replacement , replacement ) ; m_clientsPendingNotification . put ( c , replacement ) ; } } catch ( ExecutionException e ) { VoltDB . crashLocalVoltDB ( "Unexpected exception pushing client notifications" , true , Throwables . getRootCause ( e ) ) ; } } } } ) ; }
The collection will be filtered to exclude non VoltPort connections
31,769
public PerfCounter get ( String counter ) { if ( ! this . Counters . containsKey ( counter ) ) this . Counters . put ( counter , new PerfCounter ( false ) ) ; return this . Counters . get ( counter ) ; }
Gets a performance counter .
31,770
public void update ( String counter , long executionDuration , boolean success ) { this . get ( counter ) . update ( executionDuration , success ) ; }
Tracks a generic call execution by reporting the execution duration . This method should be used for successful calls only .
31,771
public String toRawString ( char delimiter ) { StringBuilder result = new StringBuilder ( ) ; for ( Entry < String , PerfCounter > e : Counters . entrySet ( ) ) { result . append ( e . getKey ( ) ) . append ( delimiter ) . append ( e . getValue ( ) . toRawString ( delimiter ) ) . append ( '\n' ) ; } return result . toString ( ) ; }
Gets the statistics as delimiter separated strings . Each line contains statistics for a single procedure . There might be multiple lines .
31,772
private void leaderElection ( ) { loggingLog . info ( "Starting leader election for snapshot truncation daemon" ) ; try { while ( true ) { Stat stat = m_zk . exists ( VoltZK . snapshot_truncation_master , new Watcher ( ) { public void process ( WatchedEvent event ) { switch ( event . getType ( ) ) { case NodeDeleted : loggingLog . info ( "Detected the snapshot truncation leader's ephemeral node deletion" ) ; m_es . execute ( new Runnable ( ) { public void run ( ) { leaderElection ( ) ; } } ) ; break ; default : break ; } } } ) ; if ( stat == null ) { try { m_zk . create ( VoltZK . snapshot_truncation_master , null , Ids . OPEN_ACL_UNSAFE , CreateMode . EPHEMERAL ) ; m_isAutoSnapshotLeader = true ; if ( m_lastKnownSchedule != null ) { makeActivePrivate ( m_lastKnownSchedule ) ; } electedTruncationLeader ( ) ; return ; } catch ( NodeExistsException e ) { } } else { loggingLog . info ( "Leader election concluded, a leader already exists" ) ; break ; } } } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Exception in snapshot daemon electing master via ZK" , true , e ) ; } }
Leader election for snapshots . Leader will watch for truncation and user snapshot requests
31,773
public ListenableFuture < Void > mayGoActiveOrInactive ( final SnapshotSchedule schedule ) { return m_es . submit ( new Callable < Void > ( ) { public Void call ( ) throws Exception { makeActivePrivate ( schedule ) ; return null ; } } ) ; }
Make this SnapshotDaemon responsible for generating snapshots
31,774
private void doPeriodicWork ( final long now ) { if ( m_lastKnownSchedule == null ) { setState ( State . STARTUP ) ; return ; } if ( m_frequencyUnit == null ) { return ; } if ( m_state == State . STARTUP ) { initiateSnapshotScan ( ) ; } else if ( m_state == State . SCANNING ) { RateLimitedLogger . tryLogForMessage ( System . currentTimeMillis ( ) , 5 , TimeUnit . MINUTES , SNAP_LOG , Level . INFO , "Blocked in scanning" ) ; return ; } else if ( m_state == State . WAITING ) { processWaitingPeriodicWork ( now ) ; } else if ( m_state == State . SNAPSHOTTING ) { return ; } else if ( m_state == State . DELETING ) { return ; } }
Invoked by the client interface occasionally . Returns null if nothing needs to be done or the name of a sysproc along with procedure parameters if there is work to be done . Responses come back later via invocations of processClientResponse
31,775
private void processWaitingPeriodicWork ( long now ) { if ( now - m_lastSysprocInvocation < m_minTimeBetweenSysprocs ) { return ; } if ( m_snapshots . size ( ) > m_retain ) { if ( ! SnapshotSiteProcessor . ExecutionSitesCurrentlySnapshotting . isEmpty ( ) ) { m_lastSysprocInvocation = System . currentTimeMillis ( ) + 3000 ; return ; } deleteExtraSnapshots ( ) ; return ; } if ( m_nextSnapshotTime < now ) { initiateNextSnapshot ( now ) ; return ; } }
Do periodic work when the daemon is in the waiting state . The daemon paces out sysproc invocations over time to avoid disrupting regular work . If the time for the next snapshot has passed it attempts to initiate a new snapshot . If there are too many snapshots being retains it attempts to delete the extras . Then it attempts to initiate a new snapshot if one is due
31,776
public Future < Void > processClientResponse ( final Callable < ClientResponseImpl > response ) { return m_es . submit ( new Callable < Void > ( ) { public Void call ( ) throws Exception { try { ClientResponseImpl resp = response . call ( ) ; long handle = resp . getClientHandle ( ) ; m_procedureCallbacks . remove ( handle ) . clientCallback ( resp ) ; } catch ( Exception e ) { SNAP_LOG . warn ( "Error when SnapshotDaemon invoked callback for a procedure invocation" , e ) ; } return null ; } } ) ; }
Process responses to sysproc invocations generated by this daemon via processPeriodicWork
31,777
private void processSnapshotResponse ( ClientResponse response ) { setState ( State . WAITING ) ; final long now = System . currentTimeMillis ( ) ; m_nextSnapshotTime += m_frequencyInMillis ; if ( m_nextSnapshotTime < now ) { m_nextSnapshotTime = now - 1 ; } if ( response . getStatus ( ) != ClientResponse . SUCCESS ) { logFailureResponse ( "Snapshot failed" , response ) ; return ; } final VoltTable results [ ] = response . getResults ( ) ; final VoltTable result = results [ 0 ] ; final String err = SnapshotUtil . didSnapshotRequestFailWithErr ( results ) ; if ( err != null ) { SNAP_LOG . warn ( "Snapshot failed with failure response: " + err ) ; m_snapshots . removeLast ( ) ; return ; } boolean success = true ; while ( result . advanceRow ( ) ) { if ( ! result . getString ( "RESULT" ) . equals ( "SUCCESS" ) ) { success = false ; SNAP_LOG . warn ( "Snapshot save feasibility test failed for host " + result . getLong ( "HOST_ID" ) + " table " + result . getString ( "TABLE" ) + " with error message " + result . getString ( "ERR_MSG" ) ) ; } } if ( ! success ) { m_snapshots . removeLast ( ) ; } }
Confirm and log that the snapshot was a success
31,778
private void processDeleteResponse ( ClientResponse response ) { setState ( State . WAITING ) ; if ( response . getStatus ( ) != ClientResponse . SUCCESS ) { logFailureResponse ( "Delete of snapshots failed" , response ) ; return ; } final VoltTable results [ ] = response . getResults ( ) ; final String err = SnapshotUtil . didSnapshotRequestFailWithErr ( results ) ; if ( err != null ) { SNAP_LOG . warn ( "Snapshot delete failed with failure response: " + err ) ; return ; } }
Process a response to a request to delete snapshots . Always transitions to the waiting state even if the delete fails . This ensures the system will continue to snapshot until the disk is full in the event that there is an administration error or a bug .
31,779
private void processScanResponse ( ClientResponse response ) { setState ( State . WAITING ) ; if ( response . getStatus ( ) != ClientResponse . SUCCESS ) { logFailureResponse ( "Initial snapshot scan failed" , response ) ; return ; } final VoltTable results [ ] = response . getResults ( ) ; if ( results . length == 1 ) { final VoltTable result = results [ 0 ] ; boolean advanced = result . advanceRow ( ) ; assert ( advanced ) ; assert ( result . getColumnCount ( ) == 1 ) ; assert ( result . getColumnType ( 0 ) == VoltType . STRING ) ; SNAP_LOG . warn ( "Initial snapshot scan failed with failure response: " + result . getString ( "ERR_MSG" ) ) ; return ; } assert ( results . length == 3 ) ; final VoltTable snapshots = results [ 0 ] ; assert ( snapshots . getColumnCount ( ) == 10 ) ; final File myPath = new File ( m_path ) ; while ( snapshots . advanceRow ( ) ) { final String path = snapshots . getString ( "PATH" ) ; final File pathFile = new File ( path ) ; if ( pathFile . equals ( myPath ) ) { final String nonce = snapshots . getString ( "NONCE" ) ; if ( nonce . startsWith ( m_prefixAndSeparator ) ) { final Long txnId = snapshots . getLong ( "TXNID" ) ; m_snapshots . add ( new Snapshot ( path , SnapshotPathType . SNAP_AUTO , nonce , txnId ) ) ; } } } java . util . Collections . sort ( m_snapshots ) ; deleteExtraSnapshots ( ) ; }
Process the response to a snapshot scan . Find the snapshots that are managed by this daemon by path and nonce and add it the list . Initiate a delete of any that should not be retained
31,780
private void deleteExtraSnapshots ( ) { if ( m_snapshots . size ( ) <= m_retain ) { setState ( State . WAITING ) ; } else { m_lastSysprocInvocation = System . currentTimeMillis ( ) ; setState ( State . DELETING ) ; final int numberToDelete = m_snapshots . size ( ) - m_retain ; String pathsToDelete [ ] = new String [ numberToDelete ] ; String noncesToDelete [ ] = new String [ numberToDelete ] ; for ( int ii = 0 ; ii < numberToDelete ; ii ++ ) { final Snapshot s = m_snapshots . poll ( ) ; pathsToDelete [ ii ] = s . path ; noncesToDelete [ ii ] = s . nonce ; SNAP_LOG . info ( "Snapshot daemon deleting " + s . nonce ) ; } Object params [ ] = new Object [ ] { pathsToDelete , noncesToDelete , SnapshotPathType . SNAP_AUTO . toString ( ) } ; long handle = m_nextCallbackHandle ++ ; m_procedureCallbacks . put ( handle , new ProcedureCallback ( ) { public void clientCallback ( final ClientResponse clientResponse ) throws Exception { processClientResponsePrivate ( clientResponse ) ; } } ) ; m_initiator . initiateSnapshotDaemonWork ( "@SnapshotDelete" , handle , params ) ; } }
Check if there are extra snapshots and initiate deletion
31,781
public void createAndWatchRequestNode ( final long clientHandle , final Connection c , SnapshotInitiationInfo snapInfo , boolean notifyChanges ) throws ForwardClientException { boolean requestExists = false ; final String requestId = createRequestNode ( snapInfo ) ; if ( requestId == null ) { requestExists = true ; } else { if ( ! snapInfo . isTruncationRequest ( ) ) { try { registerUserSnapshotResponseWatch ( requestId , clientHandle , c , notifyChanges ) ; } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Failed to register ZK watch on snapshot response" , true , e ) ; } } else { VoltTable result = SnapshotUtil . constructNodeResultsTable ( ) ; result . addRow ( - 1 , CoreUtils . getHostnameOrAddress ( ) , "" , "SUCCESS" , "SNAPSHOT REQUEST QUEUED" ) ; final ClientResponseImpl resp = new ClientResponseImpl ( ClientResponseImpl . SUCCESS , new VoltTable [ ] { result } , "User-requested truncation snapshot successfully queued for execution." , clientHandle ) ; ByteBuffer buf = ByteBuffer . allocate ( resp . getSerializedSize ( ) + 4 ) ; buf . putInt ( buf . capacity ( ) - 4 ) ; resp . flattenToBuffer ( buf ) . flip ( ) ; c . writeStream ( ) . enqueue ( buf ) ; } } if ( requestExists ) { VoltTable result = SnapshotUtil . constructNodeResultsTable ( ) ; result . addRow ( - 1 , CoreUtils . getHostnameOrAddress ( ) , "" , "FAILURE" , "SNAPSHOT IN PROGRESS" ) ; throw new ForwardClientException ( "A request to perform a user snapshot already exists" , result ) ; } }
Try to create the ZK request node and watch it if created successfully .
31,782
private String createRequestNode ( SnapshotInitiationInfo snapInfo ) { String requestId = null ; try { requestId = java . util . UUID . randomUUID ( ) . toString ( ) ; if ( ! snapInfo . isTruncationRequest ( ) ) { final JSONObject jsObj = snapInfo . getJSONObjectForZK ( ) ; jsObj . put ( "requestId" , requestId ) ; String zkString = jsObj . toString ( 4 ) ; byte zkBytes [ ] = zkString . getBytes ( "UTF-8" ) ; m_zk . create ( VoltZK . user_snapshot_request , zkBytes , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; } else { m_zk . create ( VoltZK . request_truncation_snapshot_node , null , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT_SEQUENTIAL ) ; } } catch ( KeeperException . NodeExistsException e ) { return null ; } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Exception while attempting to create user snapshot request in ZK" , true , e ) ; } return requestId ; }
Try to create the ZK node to request the snapshot .
31,783
public final static < T extends FastSerializable > T deserialize ( final byte [ ] data , final Class < T > expectedType ) throws IOException { final FastDeserializer in = new FastDeserializer ( data ) ; return in . readObject ( expectedType ) ; }
Read an object from its byte array representation . This is a shortcut utility method useful when only a single object needs to be deserialized .
31,784
public < T extends FastSerializable > T readObject ( final Class < T > expectedType ) throws IOException { assert ( expectedType != null ) ; T obj = null ; try { obj = expectedType . newInstance ( ) ; obj . readExternal ( this ) ; } catch ( final InstantiationException e ) { e . printStackTrace ( ) ; } catch ( final IllegalAccessException e ) { e . printStackTrace ( ) ; } return obj ; }
Read an object from a a byte array stream assuming you know the expected type .
31,785
public FastSerializable readObject ( final FastSerializable obj , final DeserializationMonitor monitor ) throws IOException { final int startPosition = buffer . position ( ) ; obj . readExternal ( this ) ; final int endPosition = buffer . position ( ) ; if ( monitor != null ) { monitor . deserializedBytes ( endPosition - startPosition ) ; } return obj ; }
Read an object from a a byte array stream into th provied instance . Takes in a deserialization monitor which is notified of how many bytes were deserialized .
31,786
public static String readString ( ByteBuffer buffer ) throws IOException { final int NULL_STRING_INDICATOR = - 1 ; final int len = buffer . getInt ( ) ; if ( len == NULL_STRING_INDICATOR ) return null ; assert len >= 0 ; if ( len > VoltType . MAX_VALUE_LENGTH ) { throw new IOException ( "Serializable strings cannot be longer then " + VoltType . MAX_VALUE_LENGTH + " bytes" ) ; } if ( len < NULL_STRING_INDICATOR ) { throw new IOException ( "String length is negative " + len ) ; } final byte [ ] strbytes = new byte [ len ] ; buffer . get ( strbytes ) ; String retval = null ; try { retval = new String ( strbytes , "UTF-8" ) ; } catch ( final UnsupportedEncodingException e ) { e . printStackTrace ( ) ; } return retval ; }
Read a string in the standard VoltDB way without wrapping the byte buffer [
31,787
public String readString ( ) throws IOException { final int len = readInt ( ) ; if ( len == VoltType . NULL_STRING_LENGTH ) { return null ; } if ( len < VoltType . NULL_STRING_LENGTH ) { throw new IOException ( "String length is negative " + len ) ; } if ( len > buffer . remaining ( ) ) { throw new IOException ( "String length is bigger than total buffer " + len ) ; } final byte [ ] strbytes = new byte [ len ] ; readFully ( strbytes ) ; return new String ( strbytes , Constants . UTF8ENCODING ) ; }
Read a string in the standard VoltDB way . That is four bytes of length info followed by the bytes of characters encoded in UTF - 8 .
31,788
public ByteBuffer readBuffer ( final int byteLen ) { final byte [ ] data = new byte [ byteLen ] ; buffer . get ( data ) ; return ByteBuffer . wrap ( data ) ; }
Create a copy of the first byteLen bytes of the underlying buffer .
31,789
private boolean removeUDFInSchema ( String functionName ) { for ( int idx = 0 ; idx < m_schema . children . size ( ) ; idx ++ ) { VoltXMLElement func = m_schema . children . get ( idx ) ; if ( "ud_function" . equals ( func . name ) ) { String fnm = func . attributes . get ( "name" ) ; if ( fnm != null && functionName . equals ( fnm ) ) { m_schema . children . remove ( idx ) ; m_tracker . addDroppedFunction ( functionName ) ; m_logger . debug ( String . format ( "Removed XML for" + " function named %s" , functionName ) ) ; return true ; } } } return false ; }
Remove the function with the given name from the VoltXMLElement schema if it is there already .
31,790
public String toXML ( ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n" ) ; toXML ( sb , 0 ) ; return sb . toString ( ) ; }
Convert VoltXML to more conventional XML .
31,791
public List < VoltXMLElement > findChildrenRecursively ( String name ) { List < VoltXMLElement > retval = new ArrayList < > ( ) ; for ( VoltXMLElement vxe : children ) { if ( name . equals ( vxe . name ) ) { retval . add ( vxe ) ; } retval . addAll ( vxe . findChildrenRecursively ( name ) ) ; } return retval ; }
Given a name recursively find all the children with matching name if any .
31,792
public List < VoltXMLElement > findChildren ( String name ) { List < VoltXMLElement > retval = new ArrayList < > ( ) ; for ( VoltXMLElement vxe : children ) { if ( name . equals ( vxe . name ) ) { retval . add ( vxe ) ; } } return retval ; }
Given a name find all the immediate children with matching name if any .
31,793
public VoltXMLElement findChild ( String uniqueName ) { for ( VoltXMLElement vxe : children ) { if ( uniqueName . equals ( vxe . getUniqueName ( ) ) ) { return vxe ; } } return null ; }
Given an value in the format of that returned by getUniqueName find the child element which matches if any .
31,794
static public VoltXMLDiff computeDiff ( VoltXMLElement before , VoltXMLElement after ) { if ( ! before . getUniqueName ( ) . equals ( after . getUniqueName ( ) ) ) { return null ; } VoltXMLDiff result = new VoltXMLDiff ( before . getUniqueName ( ) ) ; if ( before . toMinString ( ) . equals ( after . toMinString ( ) ) ) { return result ; } for ( int i = 0 ; i < after . children . size ( ) ; i ++ ) { VoltXMLElement child = after . children . get ( i ) ; result . m_elementOrder . put ( child . getUniqueName ( ) , i ) ; } Set < String > firstKeys = before . attributes . keySet ( ) ; Set < String > secondKeys = new HashSet < > ( ) ; secondKeys . addAll ( after . attributes . keySet ( ) ) ; for ( String firstKey : firstKeys ) { if ( ! secondKeys . contains ( firstKey ) ) { result . m_removedAttributes . add ( firstKey ) ; } else if ( ! ( after . attributes . get ( firstKey ) . equals ( before . attributes . get ( firstKey ) ) ) ) { result . m_changedAttributes . put ( firstKey , after . attributes . get ( firstKey ) ) ; } secondKeys . remove ( firstKey ) ; } for ( String key : secondKeys ) { result . m_addedAttributes . put ( key , after . attributes . get ( key ) ) ; } Set < String > firstChildren = new HashSet < > ( ) ; for ( VoltXMLElement child : before . children ) { firstChildren . add ( child . getUniqueName ( ) ) ; } Set < String > secondChildren = new HashSet < > ( ) ; for ( VoltXMLElement child : after . children ) { secondChildren . add ( child . getUniqueName ( ) ) ; } Set < String > commonNames = new HashSet < > ( ) ; for ( VoltXMLElement firstChild : before . children ) { if ( ! secondChildren . contains ( firstChild . getUniqueName ( ) ) ) { result . m_removedElements . add ( firstChild ) ; } else { commonNames . add ( firstChild . getUniqueName ( ) ) ; } } for ( VoltXMLElement secondChild : after . children ) { if ( ! firstChildren . contains ( secondChild . getUniqueName ( ) ) ) { result . m_addedElements . add ( secondChild ) ; } else { assert ( commonNames . contains ( secondChild . getUniqueName ( ) ) ) ; } } for ( String name : commonNames ) { VoltXMLDiff childDiff = computeDiff ( before . findChild ( name ) , after . findChild ( name ) ) ; if ( ! childDiff . isEmpty ( ) ) { result . m_changedElements . put ( name , childDiff ) ; } } return result ; }
Compute the diff necessary to turn the before tree into the after tree .
31,795
public List < VoltXMLElement > extractSubElements ( String elementName , String attrName , String attrValue ) { assert ( elementName != null ) ; assert ( ( elementName != null && attrValue != null ) || attrName == null ) ; List < VoltXMLElement > elements = new ArrayList < > ( ) ; extractSubElements ( elementName , attrName , attrValue , elements ) ; return elements ; }
Recursively extract sub elements of a given name with matching attribute if it is not null .
31,796
static int getHexValue ( int c ) { if ( c >= '0' && c <= '9' ) { c -= '0' ; } else if ( c > 'z' ) { c = 16 ; } else if ( c >= 'a' ) { c -= ( 'a' - 10 ) ; } else if ( c > 'Z' ) { c = 16 ; } else if ( c >= 'A' ) { c -= ( 'A' - 10 ) ; } else { c = - 1 ; } return c ; }
returns hex value of a hex character or 16 if not a hex character
31,797
boolean scanSpecialIdentifier ( String identifier ) { int length = identifier . length ( ) ; if ( limit - currentPosition < length ) { return false ; } for ( int i = 0 ; i < length ; i ++ ) { int character = identifier . charAt ( i ) ; if ( character == sqlString . charAt ( currentPosition + i ) ) { continue ; } if ( character == Character . toUpperCase ( sqlString . charAt ( currentPosition + i ) ) ) { continue ; } return false ; } currentPosition += length ; return true ; }
Only for identifiers that are part of known token sequences
31,798
IntervalType scanIntervalType ( ) { int precision = - 1 ; int scale = - 1 ; int startToken ; int endToken ; final int errorCode = ErrorCode . X_22006 ; startToken = endToken = token . tokenType ; scanNext ( errorCode ) ; if ( token . tokenType == Tokens . OPENBRACKET ) { scanNext ( errorCode ) ; if ( token . dataType == null || token . dataType . typeCode != Types . SQL_INTEGER ) { throw Error . error ( errorCode ) ; } precision = ( ( Number ) this . token . tokenValue ) . intValue ( ) ; scanNext ( errorCode ) ; if ( token . tokenType == Tokens . COMMA ) { if ( startToken != Tokens . SECOND ) { throw Error . error ( errorCode ) ; } scanNext ( errorCode ) ; if ( token . dataType == null || token . dataType . typeCode != Types . SQL_INTEGER ) { throw Error . error ( errorCode ) ; } scale = ( ( Number ) token . tokenValue ) . intValue ( ) ; scanNext ( errorCode ) ; } if ( token . tokenType != Tokens . CLOSEBRACKET ) { throw Error . error ( errorCode ) ; } scanNext ( errorCode ) ; } if ( token . tokenType == Tokens . TO ) { scanNext ( errorCode ) ; endToken = token . tokenType ; scanNext ( errorCode ) ; } if ( token . tokenType == Tokens . OPENBRACKET ) { if ( endToken != Tokens . SECOND || endToken == startToken ) { throw Error . error ( errorCode ) ; } scanNext ( errorCode ) ; if ( token . dataType == null || token . dataType . typeCode != Types . SQL_INTEGER ) { throw Error . error ( errorCode ) ; } scale = ( ( Number ) token . tokenValue ) . intValue ( ) ; scanNext ( errorCode ) ; if ( token . tokenType != Tokens . CLOSEBRACKET ) { throw Error . error ( errorCode ) ; } scanNext ( errorCode ) ; } int startIndex = ArrayUtil . find ( Tokens . SQL_INTERVAL_FIELD_CODES , startToken ) ; int endIndex = ArrayUtil . find ( Tokens . SQL_INTERVAL_FIELD_CODES , endToken ) ; return IntervalType . getIntervalType ( startIndex , endIndex , precision , scale ) ; }
Reads the type part of the INTERVAL
31,799
public synchronized Object convertToDatetimeInterval ( String s , DTIType type ) { Object value ; IntervalType intervalType = null ; int dateTimeToken = - 1 ; int errorCode = type . isDateTimeType ( ) ? ErrorCode . X_22007 : ErrorCode . X_22006 ; reset ( s ) ; resetState ( ) ; scanToken ( ) ; scanWhitespace ( ) ; switch ( token . tokenType ) { case Tokens . INTERVAL : case Tokens . DATE : case Tokens . TIME : case Tokens . TIMESTAMP : dateTimeToken = token . tokenType ; scanToken ( ) ; if ( token . tokenType != Tokens . X_VALUE || token . dataType . typeCode != Types . SQL_CHAR ) { throw Error . error ( errorCode ) ; } s = token . tokenString ; scanNext ( ErrorCode . X_22007 ) ; if ( type . isIntervalType ( ) ) { intervalType = scanIntervalType ( ) ; } if ( token . tokenType != Tokens . X_ENDPARSE ) { throw Error . error ( errorCode ) ; } default : } switch ( type . typeCode ) { case Types . SQL_DATE : if ( dateTimeToken != - 1 && dateTimeToken != Tokens . DATE ) { throw Error . error ( errorCode ) ; } return newDate ( s ) ; case Types . SQL_TIME : case Types . SQL_TIME_WITH_TIME_ZONE : { if ( dateTimeToken != - 1 && dateTimeToken != Tokens . TIME ) { throw Error . error ( errorCode ) ; } return newTime ( s ) ; } case Types . SQL_TIMESTAMP : case Types . SQL_TIMESTAMP_WITH_TIME_ZONE : { if ( dateTimeToken != - 1 && dateTimeToken != Tokens . TIMESTAMP ) { throw Error . error ( errorCode ) ; } return newTimestamp ( s ) ; } default : if ( dateTimeToken != - 1 && dateTimeToken != Tokens . INTERVAL ) { throw Error . error ( errorCode ) ; } if ( type . isIntervalType ( ) ) { value = newInterval ( s , ( IntervalType ) type ) ; if ( intervalType != null ) { if ( intervalType . startIntervalType != type . startIntervalType || intervalType . endIntervalType != type . endIntervalType ) { throw Error . error ( errorCode ) ; } } return value ; } throw Error . runtimeError ( ErrorCode . U_S0500 , "Scanner" ) ; } }
should perform range checks etc .