idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
32,100
public Options addOption ( String opt , String description ) { addOption ( opt , null , false , description ) ; return this ; }
Add an option that only contains a short name . The option does not take an argument .
32,101
public Options addOption ( String opt , boolean hasArg , String description ) { addOption ( opt , null , hasArg , description ) ; return this ; }
Add an option that only contains a short - name . It may be specified as requiring an argument .
32,102
public Options addOption ( String opt , String longOpt , boolean hasArg , String description ) { addOption ( new Option ( opt , longOpt , hasArg , description ) ) ; return this ; }
Add an option that contains a short - name and a long - name . It may be specified as requiring an argument .
32,103
public CommandLine makeCopy ( ) { CommandLine cl = new CommandLine ( m_startAction ) ; cl . m_ipcPort = m_ipcPort ; cl . m_backend = m_backend ; cl . m_leader = m_leader ; cl . m_pathToCatalog = m_pathToCatalog ; cl . m_pathToDeployment = m_pathToDeployment ; cl . m_pathToLicense = m_pathToLicense ; cl . m_noLoadLibVOLTDB = m_noLoadLibVOLTDB ; cl . m_zkInterface = m_zkInterface ; cl . m_port = m_port ; cl . m_adminPort = m_adminPort ; cl . m_internalPort = m_internalPort ; cl . m_externalInterface = m_externalInterface ; cl . m_internalInterface = m_internalInterface ; cl . m_drAgentPortStart = m_drAgentPortStart ; cl . m_httpPort = m_httpPort ; cl . m_drPublicHost = m_drPublicHost ; cl . m_drPublicPort = m_drPublicPort ; cl . m_deadHostTimeoutMS = m_deadHostTimeoutMS ; cl . m_startMode = m_startMode ; cl . m_selectedRejoinInterface = m_selectedRejoinInterface ; cl . m_quietAdhoc = m_quietAdhoc ; cl . m_timestampTestingSalt = m_timestampTestingSalt ; cl . m_isRejoinTest = m_isRejoinTest ; cl . m_tag = m_tag ; cl . m_vemTag = m_vemTag ; cl . m_versionStringOverrideForTest = m_versionStringOverrideForTest ; cl . m_versionCompatibilityRegexOverrideForTest = m_versionCompatibilityRegexOverrideForTest ; cl . m_buildStringOverrideForTest = m_buildStringOverrideForTest ; cl . m_forceVoltdbCreate = m_forceVoltdbCreate ; cl . m_userSchemas = m_userSchemas ; cl . m_stagedClassesPaths = m_stagedClassesPaths ; cl . includeTestOpts = includeTestOpts ; cl . debugPort = debugPort ; cl . zkport = zkport ; cl . buildDir = buildDir ; cl . volt_root = volt_root ; cl . java_library_path = java_library_path ; cl . rmi_host_name = rmi_host_name ; cl . log4j = log4j ; cl . gcRollover = gcRollover ; cl . voltFilePrefix = voltFilePrefix ; cl . initialHeap = initialHeap ; cl . maxHeap = maxHeap ; cl . classPath = classPath ; cl . javaExecutable = javaExecutable ; cl . jmxPort = jmxPort ; cl . jmxHost = jmxHost ; cl . customCmdLn = customCmdLn ; cl . m_isPaused = m_isPaused ; cl . m_meshBrokers = m_meshBrokers ; cl . m_coordinators = ImmutableSortedSet . copyOf ( m_coordinators ) ; cl . m_hostCount = m_hostCount ; cl . m_enableAdd = m_enableAdd ; cl . m_voltdbRoot = m_voltdbRoot ; cl . m_newCli = m_newCli ; cl . m_sslEnable = m_sslEnable ; cl . m_sslExternal = m_sslExternal ; cl . m_sslInternal = m_sslInternal ; cl . m_placementGroup = m_placementGroup ; if ( javaProperties != null ) { cl . javaProperties = new TreeMap < > ( ) ; for ( Entry < String , String > e : javaProperties . entrySet ( ) ) { cl . javaProperties . put ( e . getKey ( ) , e . getValue ( ) ) ; } } cl . m_missingHostCount = m_missingHostCount ; return cl ; }
Copy ctor .
32,104
public static VoltXMLElement mergeTwoElementsUsingOperator ( String opName , String opElementId , VoltXMLElement first , VoltXMLElement second ) { if ( first == null || second == null ) { return first == null ? second : first ; } if ( opName == null || opElementId == null ) { return null ; } VoltXMLElement retval = new VoltXMLElement ( "operation" ) ; retval . attributes . put ( "id" , opElementId ) ; retval . attributes . put ( "optype" , opName ) ; retval . children . add ( first ) ; retval . children . add ( second ) ; return retval ; }
If one of the elements is null return the other one diectly .
32,105
public static List < VoltXMLElement > buildLimitElements ( int limit , String limitValueElementId ) { if ( limitValueElementId == null ) { return null ; } List < VoltXMLElement > retval = new ArrayList < VoltXMLElement > ( ) ; retval . add ( new VoltXMLElement ( "offset" ) ) ; VoltXMLElement limitElement = new VoltXMLElement ( "limit" ) ; String strLimit = String . valueOf ( limit ) ; limitElement . attributes . put ( "limit" , strLimit ) ; limitElement . children . add ( buildValueElement ( limitValueElementId , false , strLimit , "BIGINT" ) ) ; retval . add ( limitElement ) ; return retval ; }
Build VoltXMLElement for expression like LIMIT 1 .
32,106
public static VoltXMLElement buildColumnParamJoincondElement ( String opName , VoltXMLElement leftElement , String valueParamElementId , String opElementId ) { VoltXMLElement valueParamElement = buildValueElement ( valueParamElementId ) ; return mergeTwoElementsUsingOperator ( opName , opElementId , leftElement , valueParamElement ) ; }
Build VoltXMLElement for expression like column = ? .
32,107
public static VoltXMLElement buildParamElement ( String elementId , String index , String valueType ) { VoltXMLElement retval = new VoltXMLElement ( "parameter" ) ; retval . attributes . put ( "id" , elementId ) ; retval . attributes . put ( "index" , index ) ; retval . attributes . put ( "valuetype" , valueType ) ; return retval ; }
Build an element to be inserted under the parameters tree .
32,108
public void loadFromJSONObject ( JSONObject jobj , Database db ) throws JSONException { super . loadFromJSONObject ( jobj , db ) ; m_lookupType = IndexLookupType . get ( jobj . getString ( Members . LOOKUP_TYPE . name ( ) ) ) ; m_sortDirection = SortDirectionType . get ( jobj . getString ( Members . SORT_DIRECTION . name ( ) ) ) ; if ( jobj . has ( Members . HAS_OFFSET_RANK . name ( ) ) ) { m_hasOffsetRankOptimization = jobj . getBoolean ( Members . HAS_OFFSET_RANK . name ( ) ) ; } m_purpose = jobj . has ( Members . PURPOSE . name ( ) ) ? jobj . getInt ( Members . PURPOSE . name ( ) ) : FOR_SCANNING_PERFORMANCE_OR_ORDERING ; m_targetIndexName = jobj . getString ( Members . TARGET_INDEX_NAME . name ( ) ) ; m_catalogIndex = db . getTables ( ) . get ( super . m_targetTableName ) . getIndexes ( ) . get ( m_targetIndexName ) ; m_endExpression = AbstractExpression . fromJSONChild ( jobj , Members . END_EXPRESSION . name ( ) , m_tableScan ) ; m_initialExpression = AbstractExpression . fromJSONChild ( jobj , Members . INITIAL_EXPRESSION . name ( ) , m_tableScan ) ; AbstractExpression . loadFromJSONArrayChild ( m_searchkeyExpressions , jobj , Members . SEARCHKEY_EXPRESSIONS . name ( ) , m_tableScan ) ; loadBooleanArrayFromJSONObject ( jobj , Members . COMPARE_NOTDISTINCT . name ( ) , m_compareNotDistinct ) ; m_skip_null_predicate = AbstractExpression . fromJSONChild ( jobj , Members . SKIP_NULL_PREDICATE . name ( ) , m_tableScan ) ; }
all members loaded
32,109
public boolean isPredicatesOptimizableForAggregate ( ) { List < AbstractExpression > predicates = ExpressionUtil . uncombinePredicate ( m_predicate ) ; if ( predicates . size ( ) != 1 ) { return false ; } AbstractExpression expr = predicates . get ( 0 ) ; if ( expr . getExpressionType ( ) != ExpressionType . OPERATOR_NOT ) { return false ; } if ( expr . getLeft ( ) . getExpressionType ( ) != ExpressionType . OPERATOR_IS_NULL ) { return false ; } if ( m_lookupType != IndexLookupType . LT && m_lookupType != IndexLookupType . LTE ) { return false ; } return true ; }
added for reverse scan purpose only
32,110
private void respondWithDummy ( ) { final FragmentResponseMessage response = new FragmentResponseMessage ( m_fragmentMsg , m_initiator . getHSId ( ) ) ; response . m_sourceHSId = m_initiator . getHSId ( ) ; response . setRecovering ( true ) ; response . setStatus ( FragmentResponseMessage . SUCCESS , null ) ; for ( int frag = 0 ; frag < m_fragmentMsg . getFragmentCount ( ) ; frag ++ ) { final int outputDepId = m_fragmentMsg . getOutputDepId ( frag ) ; response . addDependency ( new DependencyPair . BufferDependencyPair ( outputDepId , m_rawDummyResponse , 0 , m_rawDummyResponse . length ) ) ; } response . setRespBufferable ( m_respBufferable ) ; m_initiator . deliver ( response ) ; }
Respond with a dummy fragment response .
32,111
public FragmentResponseMessage processFragmentTask ( SiteProcedureConnection siteConnection ) { final FragmentResponseMessage currentFragResponse = new FragmentResponseMessage ( m_fragmentMsg , m_initiator . getHSId ( ) ) ; currentFragResponse . setStatus ( FragmentResponseMessage . SUCCESS , null ) ; for ( int frag = 0 ; frag < m_fragmentMsg . getFragmentCount ( ) ; frag ++ ) { final long fragmentId = VoltSystemProcedure . hashToFragId ( m_fragmentMsg . getPlanHash ( frag ) ) ; final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPSITE ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . beginDuration ( "runfragmenttask" , "txnId" , TxnEgo . txnIdToString ( getTxnId ( ) ) , "partition" , Integer . toString ( siteConnection . getCorrespondingPartitionId ( ) ) , "fragmentId" , String . valueOf ( fragmentId ) ) ) ; } ParameterSet params = m_fragmentMsg . getParameterSetForFragment ( frag ) ; try { final DependencyPair dep = siteConnection . executeSysProcPlanFragment ( m_txnState , m_inputDeps , fragmentId , params ) ; if ( dep != null ) { currentFragResponse . addDependency ( dep ) ; } } catch ( final EEException | SQLException | ReplicatedTableException e ) { hostLog . l7dlog ( Level . TRACE , LogKeys . host_ExecutionSite_ExceptionExecutingPF . name ( ) , new Object [ ] { Encoder . hexEncode ( m_fragmentMsg . getFragmentPlan ( frag ) ) } , e ) ; currentFragResponse . setStatus ( FragmentResponseMessage . UNEXPECTED_ERROR , e ) ; addDependencyToFragment ( currentFragResponse ) ; break ; } catch ( final SerializableException e ) { currentFragResponse . setStatus ( FragmentResponseMessage . USER_ERROR , e ) ; addDependencyToFragment ( currentFragResponse ) ; break ; } catch ( final VoltAbortException e ) { currentFragResponse . setStatus ( FragmentResponseMessage . USER_ERROR , new SerializableException ( CoreUtils . throwableToString ( e ) ) ) ; addDependencyToFragment ( currentFragResponse ) ; break ; } if ( traceLog != null ) { traceLog . add ( VoltTrace :: endDuration ) ; } } currentFragResponse . setDrBufferSize ( 1 ) ; return currentFragResponse ; }
modified to work in the new world
32,112
public void run ( ) { try { VoltTable partitionKeys = null ; partitionKeys = m_client . callProcedure ( "@GetPartitionKeys" , "INTEGER" ) . getResults ( ) [ 0 ] ; while ( partitionKeys . advanceRow ( ) ) { m_client . callProcedure ( new NullCallback ( ) , "DeleteOldAdRequests" , partitionKeys . getLong ( "PARTITION_KEY" ) , m_expiredAgeInSeconds ) ; } m_client . callProcedure ( new NullCallback ( ) , "DeleteExpiredBids" ) ; } catch ( IOException | ProcCallException ex ) { ex . printStackTrace ( ) ; } }
Remove aged - out data from the ad_requests table . This table is partitioned and may be large so use the run - everywhere pattern to minimize impact to throughput .
32,113
public void updateLobUsage ( boolean commit ) { if ( ! hasLobOps ) { return ; } hasLobOps = false ; if ( commit ) { for ( int i = 0 ; i < createdLobs . size ( ) ; i ++ ) { long lobID = createdLobs . get ( i ) ; int delta = lobUsageCount . get ( lobID , 0 ) ; if ( delta == 1 ) { lobUsageCount . remove ( lobID ) ; createdLobs . remove ( i ) ; i -- ; } else if ( ! session . isBatch ) { database . lobManager . adjustUsageCount ( lobID , delta - 1 ) ; lobUsageCount . remove ( lobID ) ; createdLobs . remove ( i ) ; i -- ; } } if ( ! lobUsageCount . isEmpty ( ) ) { Iterator it = lobUsageCount . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { long lobID = it . nextLong ( ) ; int delta = lobUsageCount . get ( lobID ) ; database . lobManager . adjustUsageCount ( lobID , delta - 1 ) ; } lobUsageCount . clear ( ) ; } return ; } else { for ( int i = 0 ; i < createdLobs . size ( ) ; i ++ ) { long lobID = createdLobs . get ( i ) ; database . lobManager . deleteLob ( lobID ) ; } createdLobs . clear ( ) ; lobUsageCount . clear ( ) ; return ; } }
update LobManager user counts delete lobs that have no usage
32,114
public void allocateLobForResult ( ResultLob result , InputStream inputStream ) { long resultLobId = result . getLobID ( ) ; CountdownInputStream countStream ; switch ( result . getSubType ( ) ) { case ResultLob . LobResultTypes . REQUEST_CREATE_BYTES : { long blobId ; long blobLength = result . getBlockLength ( ) ; if ( inputStream == null ) { blobId = resultLobId ; inputStream = result . getInputStream ( ) ; } else { BlobData blob = session . createBlob ( blobLength ) ; blobId = blob . getId ( ) ; resultLobs . put ( resultLobId , blobId ) ; } countStream = new CountdownInputStream ( inputStream ) ; countStream . setCount ( blobLength ) ; database . lobManager . setBytesForNewBlob ( blobId , countStream , result . getBlockLength ( ) ) ; break ; } case ResultLob . LobResultTypes . REQUEST_CREATE_CHARS : { long clobId ; long clobLength = result . getBlockLength ( ) ; if ( inputStream == null ) { clobId = resultLobId ; if ( result . getReader ( ) != null ) { inputStream = new ReaderInputStream ( result . getReader ( ) ) ; } else { inputStream = result . getInputStream ( ) ; } } else { ClobData clob = session . createClob ( clobLength ) ; clobId = clob . getId ( ) ; resultLobs . put ( resultLobId , clobId ) ; } countStream = new CountdownInputStream ( inputStream ) ; countStream . setCount ( clobLength * 2 ) ; database . lobManager . setCharsForNewClob ( clobId , countStream , result . getBlockLength ( ) ) ; break ; } } }
allocate storage for a new LOB
32,115
public void resolveColumnIndexes ( ) { IndexScanPlanNode index_scan = ( IndexScanPlanNode ) getInlinePlanNode ( PlanNodeType . INDEXSCAN ) ; assert ( m_children . size ( ) == 2 && index_scan == null ) ; for ( AbstractPlanNode child : m_children ) { child . resolveColumnIndexes ( ) ; } final NodeSchema outer_schema = m_children . get ( 0 ) . getOutputSchema ( ) ; final NodeSchema inner_schema = m_children . get ( 1 ) . getOutputSchema ( ) ; final int outerSize = outer_schema . size ( ) ; final int innerSize = inner_schema . size ( ) ; resolvePredicate ( m_preJoinPredicate , outer_schema , inner_schema ) ; resolvePredicate ( m_joinPredicate , outer_schema , inner_schema ) ; resolvePredicate ( m_wherePredicate , outer_schema , inner_schema ) ; resolveSubqueryColumnIndexes ( ) ; for ( int i = 0 ; i < m_outputSchemaPreInlineAgg . size ( ) ; ++ i ) { SchemaColumn col = m_outputSchemaPreInlineAgg . getColumn ( i ) ; assert ( col . getExpression ( ) instanceof TupleValueExpression ) ; TupleValueExpression tve = ( TupleValueExpression ) col . getExpression ( ) ; int index ; if ( i < outerSize ) { index = tve . setColumnIndexUsingSchema ( outer_schema ) ; } else { index = tve . setColumnIndexUsingSchema ( inner_schema ) ; index += outerSize ; } if ( index == - 1 ) { throw new RuntimeException ( "Unable to find index for column: " + col . toString ( ) ) ; } tve . setColumnIndex ( index ) ; tve . setDifferentiator ( index ) ; } m_outputSchemaPreInlineAgg . sortByTveIndex ( 0 , outer_schema . size ( ) ) ; m_outputSchemaPreInlineAgg . sortByTveIndex ( outer_schema . size ( ) , m_outputSchemaPreInlineAgg . size ( ) ) ; m_hasSignificantOutputSchema = true ; resolveRealOutputSchema ( ) ; }
order and TVE indexes for the output SchemaColumns .
32,116
public void resolveSortDirection ( ) { AbstractPlanNode outerTable = m_children . get ( 0 ) ; if ( m_joinType == JoinType . FULL ) { m_sortDirection = SortDirectionType . INVALID ; return ; } if ( outerTable instanceof IndexSortablePlanNode ) { m_sortDirection = ( ( IndexSortablePlanNode ) outerTable ) . indexUse ( ) . getSortOrderFromIndexScan ( ) ; } }
right now only consider the sort direction on the outer table
32,117
protected long discountEstimatedProcessedTupleCount ( AbstractPlanNode childNode ) { AbstractExpression predicate = null ; if ( childNode instanceof AbstractScanPlanNode ) { predicate = ( ( AbstractScanPlanNode ) childNode ) . getPredicate ( ) ; } else if ( childNode instanceof NestLoopPlanNode ) { predicate = ( ( NestLoopPlanNode ) childNode ) . getWherePredicate ( ) ; } else if ( childNode instanceof NestLoopIndexPlanNode ) { AbstractPlanNode inlineIndexScan = ( ( NestLoopIndexPlanNode ) childNode ) . getInlinePlanNode ( PlanNodeType . INDEXSCAN ) ; assert ( inlineIndexScan != null ) ; predicate = ( ( AbstractScanPlanNode ) inlineIndexScan ) . getPredicate ( ) ; } else { return childNode . getEstimatedProcessedTupleCount ( ) ; } if ( predicate == null ) { return childNode . getEstimatedProcessedTupleCount ( ) ; } List < AbstractExpression > predicateExprs = ExpressionUtil . uncombinePredicate ( predicate ) ; int eqCount = 0 ; int otherCount = 0 ; final double MAX_EQ_POST_FILTER_DISCOUNT = 0.09 ; final double MAX_OTHER_POST_FILTER_DISCOUNT = 0.045 ; double discountCountFactor = 1.0 ; for ( AbstractExpression predicateExpr : predicateExprs ) { if ( ExpressionType . COMPARE_EQUAL == predicateExpr . getExpressionType ( ) ) { discountCountFactor -= Math . pow ( MAX_EQ_POST_FILTER_DISCOUNT , ++ eqCount ) ; } else { discountCountFactor -= Math . pow ( MAX_OTHER_POST_FILTER_DISCOUNT , ++ otherCount ) ; } } return ( long ) ( childNode . getEstimatedProcessedTupleCount ( ) * discountCountFactor ) ; }
Discount join node child estimates based on the number of its filters
32,118
public Serializable getObject ( ) { try { return InOutUtil . deserialize ( data ) ; } catch ( Exception e ) { throw Error . error ( ErrorCode . X_22521 , e . toString ( ) ) ; } }
This method is called from classes implementing the JDBC interfaces . Inside the engine it is used for conversion from a value of type OTHER to another type . It will throw if the OTHER is an instance of a classe that is not available .
32,119
static char [ ] [ ] createReplacementArray ( Map < Character , String > map ) { checkNotNull ( map ) ; if ( map . isEmpty ( ) ) { return EMPTY_REPLACEMENT_ARRAY ; } char max = Collections . max ( map . keySet ( ) ) ; char [ ] [ ] replacements = new char [ max + 1 ] [ ] ; for ( char c : map . keySet ( ) ) { replacements [ c ] = map . get ( c ) . toCharArray ( ) ; } return replacements ; }
original character value .
32,120
public int getPrecision ( int param ) throws SQLException { checkRange ( param ) ; Type type = rmd . columnTypes [ -- param ] ; if ( type . isDateTimeType ( ) ) { return type . displaySize ( ) ; } else { long size = type . precision ; if ( size > Integer . MAX_VALUE ) { size = 0 ; } return ( int ) size ; } }
Retrieves the designated parameter s specified column size .
32,121
public String getParameterTypeName ( int param ) throws SQLException { checkRange ( param ) ; return rmd . columnTypes [ -- param ] . getNameString ( ) ; }
Retrieves the designated parameter s database - specific type name .
32,122
protected static TaskLog initializeTaskLog ( String voltroot , int pid ) { File overflowDir = new File ( voltroot , "join_overflow" ) ; return ProClass . newInstanceOf ( "org.voltdb.rejoin.TaskLogImpl" , "Join" , ProClass . HANDLER_LOG , pid , overflowDir ) ; }
Load the pro task log
32,123
protected void restoreBlock ( RestoreWork rejoinWork , SiteProcedureConnection siteConnection ) { kickWatchdog ( true ) ; rejoinWork . restore ( siteConnection ) ; }
Received a datablock . Reset the watchdog timer and hand the block to the Site .
32,124
protected void returnToTaskQueue ( boolean sourcesReady ) { if ( sourcesReady ) { m_taskQueue . offer ( this ) ; } else { VoltDB . instance ( ) . scheduleWork ( new ReturnToTaskQueueAction ( ) , 1 , - 1 , TimeUnit . MILLISECONDS ) ; } }
or after waiting a few milliseconds
32,125
static void putLong ( ByteBuffer buffer , long value ) { value = ( value << 1 ) ^ ( value >> 63 ) ; if ( value >>> 7 == 0 ) { buffer . put ( ( byte ) value ) ; } else { buffer . put ( ( byte ) ( ( value & 0x7F ) | 0x80 ) ) ; if ( value >>> 14 == 0 ) { buffer . put ( ( byte ) ( value >>> 7 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 7 | 0x80 ) ) ; if ( value >>> 21 == 0 ) { buffer . put ( ( byte ) ( value >>> 14 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 14 | 0x80 ) ) ; if ( value >>> 28 == 0 ) { buffer . put ( ( byte ) ( value >>> 21 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 21 | 0x80 ) ) ; if ( value >>> 35 == 0 ) { buffer . put ( ( byte ) ( value >>> 28 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 28 | 0x80 ) ) ; if ( value >>> 42 == 0 ) { buffer . put ( ( byte ) ( value >>> 35 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 35 | 0x80 ) ) ; if ( value >>> 49 == 0 ) { buffer . put ( ( byte ) ( value >>> 42 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 42 | 0x80 ) ) ; if ( value >>> 56 == 0 ) { buffer . put ( ( byte ) ( value >>> 49 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 49 | 0x80 ) ) ; buffer . put ( ( byte ) ( value >>> 56 ) ) ; } } } } } } } } }
Writes a long value to the given buffer in LEB128 ZigZag encoded format
32,126
static void putInt ( ByteBuffer buffer , int value ) { value = ( value << 1 ) ^ ( value >> 31 ) ; if ( value >>> 7 == 0 ) { buffer . put ( ( byte ) value ) ; } else { buffer . put ( ( byte ) ( ( value & 0x7F ) | 0x80 ) ) ; if ( value >>> 14 == 0 ) { buffer . put ( ( byte ) ( value >>> 7 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 7 | 0x80 ) ) ; if ( value >>> 21 == 0 ) { buffer . put ( ( byte ) ( value >>> 14 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 14 | 0x80 ) ) ; if ( value >>> 28 == 0 ) { buffer . put ( ( byte ) ( value >>> 21 ) ) ; } else { buffer . put ( ( byte ) ( value >>> 21 | 0x80 ) ) ; buffer . put ( ( byte ) ( value >>> 28 ) ) ; } } } } }
Writes an int value to the given buffer in LEB128 - 64b9B ZigZag encoded format
32,127
static long getLong ( ByteBuffer buffer ) { long v = buffer . get ( ) ; long value = v & 0x7F ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 7 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 14 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 21 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 28 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 35 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 42 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 49 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= v << 56 ; } } } } } } } } value = ( value >>> 1 ) ^ ( - ( value & 1 ) ) ; return value ; }
Read an LEB128 - 64b9B ZigZag encoded long value from the given buffer
32,128
static int getInt ( ByteBuffer buffer ) { int v = buffer . get ( ) ; int value = v & 0x7F ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 7 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 14 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 21 ; if ( ( v & 0x80 ) != 0 ) { v = buffer . get ( ) ; value |= ( v & 0x7F ) << 28 ; } } } } value = ( value >>> 1 ) ^ ( - ( value & 1 ) ) ; return value ; }
Read an LEB128 - 64b9B ZigZag encoded int value from the given buffer
32,129
static public void main ( String [ ] sa ) throws IOException , TarMalformatException { if ( sa . length < 1 ) { System . out . println ( RB . singleton . getString ( RB . TARREADER_SYNTAX , TarReader . class . getName ( ) ) ) ; System . out . println ( RB . singleton . getString ( RB . LISTING_FORMAT ) ) ; System . exit ( 0 ) ; } File exDir = ( sa . length > 1 && sa [ 1 ] . startsWith ( "--directory=" ) ) ? ( new File ( sa [ 1 ] . substring ( "--directory=" . length ( ) ) ) ) : null ; int firstPatInd = ( exDir == null ) ? 2 : 3 ; if ( sa . length < firstPatInd || ( ( ! sa [ 0 ] . equals ( "t" ) ) && ! sa [ 0 ] . equals ( "x" ) ) ) { throw new IllegalArgumentException ( RB . singleton . getString ( RB . TARREADER_SYNTAXERR , TarReader . class . getName ( ) ) ) ; } String [ ] patternStrings = null ; if ( sa . length > firstPatInd ) { patternStrings = new String [ sa . length - firstPatInd ] ; for ( int i = firstPatInd ; i < sa . length ; i ++ ) { patternStrings [ i - firstPatInd ] = sa [ i ] ; } } if ( sa [ 0 ] . equals ( "t" ) && exDir != null ) { throw new IllegalArgumentException ( RB . singleton . getString ( RB . DIR_X_CONFLICT ) ) ; } int dirIndex = ( exDir == null ) ? 1 : 2 ; int tarReaderMode = sa [ 0 ] . equals ( "t" ) ? LIST_MODE : EXTRACT_MODE ; new TarReader ( new File ( sa [ dirIndex ] ) , tarReaderMode , patternStrings , null , exDir ) . read ( ) ; }
Reads a specified tar file or stdin in order to either list or extract the file tar entries depending on the first argument being t or x using default read buffer blocks .
32,130
public static Date getDateFromUniqueId ( long uniqueId ) { long time = uniqueId >> ( COUNTER_BITS + PARTITIONID_BITS ) ; time += VOLT_EPOCH ; return new Date ( time ) ; }
Given a unique id return the time of its creation by examining the embedded timestamp .
32,131
public static Object createObject ( String classname ) throws ParseException { Class < ? > cl ; try { cl = Class . forName ( classname ) ; } catch ( ClassNotFoundException cnfe ) { throw new ParseException ( "Unable to find the class: " + classname ) ; } try { return cl . newInstance ( ) ; } catch ( Exception e ) { throw new ParseException ( e . getClass ( ) . getName ( ) + "; Unable to create an instance of: " + classname ) ; } }
Create an Object from the classname and empty constructor .
32,132
public static Number createNumber ( String str ) throws ParseException { try { if ( str . indexOf ( '.' ) != - 1 ) { return Double . valueOf ( str ) ; } return Long . valueOf ( str ) ; } catch ( NumberFormatException e ) { throw new ParseException ( e . getMessage ( ) ) ; } }
Create a number from a String . If a . is present it creates a Double otherwise a Long .
32,133
private boolean fixupACL ( List < Id > authInfo , List < ACL > acl ) { if ( skipACL ) { return true ; } if ( acl == null || acl . size ( ) == 0 ) { return false ; } Iterator < ACL > it = acl . iterator ( ) ; LinkedList < ACL > toAdd = null ; while ( it . hasNext ( ) ) { ACL a = it . next ( ) ; Id id = a . getId ( ) ; if ( id . getScheme ( ) . equals ( "world" ) && id . getId ( ) . equals ( "anyone" ) ) { } else if ( id . getScheme ( ) . equals ( "auth" ) ) { it . remove ( ) ; if ( toAdd == null ) { toAdd = new LinkedList < ACL > ( ) ; } boolean authIdValid = false ; for ( Id cid : authInfo ) { AuthenticationProvider ap = ProviderRegistry . getProvider ( cid . getScheme ( ) ) ; if ( ap == null ) { LOG . error ( "Missing AuthenticationProvider for " + cid . getScheme ( ) ) ; } else if ( ap . isAuthenticated ( ) ) { authIdValid = true ; toAdd . add ( new ACL ( a . getPerms ( ) , cid ) ) ; } } if ( ! authIdValid ) { return false ; } } else { AuthenticationProvider ap = ProviderRegistry . getProvider ( id . getScheme ( ) ) ; if ( ap == null ) { return false ; } if ( ! ap . isValid ( id . getId ( ) ) ) { return false ; } } } if ( toAdd != null ) { for ( ACL a : toAdd ) { acl . add ( a ) ; } } return acl . size ( ) > 0 ; }
This method checks out the acl making sure it isn t null or empty it has valid schemes and ids and expanding any relative ids that depend on the requestor s authentication information .
32,134
public boolean authenticate ( ClientAuthScheme scheme , String fromAddress ) { if ( m_done ) throw new IllegalStateException ( "this authentication request has a result" ) ; boolean authenticated = false ; try { authenticated = authenticateImpl ( scheme , fromAddress ) ; } catch ( Exception ex ) { m_authenticationFailure = ex ; } finally { m_done = true ; } return authenticated ; }
Perform the authentication request
32,135
public long run ( String symbol , TimestampType time , long seq_number , String exchange , int bidPrice , int bidSize , int askPrice , int askSize ) throws VoltAbortException { Integer bidPriceSafe = askPrice > 0 ? askPrice : null ; Integer askPriceSafe = askPrice > 0 ? askPrice : null ; voltQueueSQL ( insertTick , symbol , time , seq_number , exchange , bidPriceSafe , bidSize , askPriceSafe , askSize ) ; voltQueueSQL ( upsertLastTick , symbol , time , seq_number , exchange , bidPrice , bidSize , askPrice , askSize ) ; voltQueueSQL ( selectMaxBid , symbol ) ; voltQueueSQL ( selectMinAsk , symbol ) ; VoltTable results0 [ ] = voltExecuteSQL ( ) ; VoltTable tb = results0 [ 2 ] ; tb . advanceRow ( ) ; String bex = tb . getString ( 0 ) ; long bid = tb . getLong ( 1 ) ; long bsize = tb . getLong ( 2 ) ; VoltTable ta = results0 [ 3 ] ; ta . advanceRow ( ) ; String aex = ta . getString ( 0 ) ; long ask = ta . getLong ( 1 ) ; long asize = ta . getLong ( 2 ) ; if ( bex . equals ( exchange ) || aex . equals ( exchange ) ) { voltQueueSQL ( insertNBBO , symbol , time , seq_number , bid , bsize , bex , ask , asize , aex ) ; voltExecuteSQL ( true ) ; } return ClientResponse . SUCCESS ; }
main method the procedure starts here .
32,136
public static < K extends Comparable < ? > , V > Builder < K , V > builder ( ) { return new Builder < K , V > ( ) ; }
Returns a new builder for an immutable range map .
32,137
void setLeaderState ( boolean isLeader ) { m_isLeader = isLeader ; if ( m_isLeader ) { if ( ! m_logSP . isEmpty ( ) ) { truncate ( m_logSP . getLast ( ) . getHandle ( ) , IS_SP ) ; } } }
leaders log differently
32,138
public void deliver ( VoltMessage msg ) { if ( ! m_isLeader && msg instanceof Iv2InitiateTaskMessage ) { final Iv2InitiateTaskMessage m = ( Iv2InitiateTaskMessage ) msg ; if ( m . isReadOnly ( ) ) { return ; } m_lastSpHandle = m . getSpHandle ( ) ; truncate ( m . getTruncationHandle ( ) , IS_SP ) ; if ( "@MigratePartitionLeader" . equalsIgnoreCase ( m . getStoredProcedureName ( ) ) ) { return ; } m_logSP . add ( new Item ( IS_SP , m , m . getSpHandle ( ) , m . getTxnId ( ) ) ) ; } else if ( msg instanceof FragmentTaskMessage ) { boolean newMp = false ; final FragmentTaskMessage m = ( FragmentTaskMessage ) msg ; if ( m . getTxnId ( ) > m_lastMpHandle || m_lastMpHandle == Long . MAX_VALUE ) { m_lastMpHandle = m . getTxnId ( ) ; newMp = true ; } if ( m . isReadOnly ( ) ) { return ; } truncate ( m . getTruncationHandle ( ) , IS_MP ) ; if ( newMp ) { m_logMP . add ( new Item ( IS_MP , m , m . getSpHandle ( ) , m . getTxnId ( ) ) ) ; m_lastSpHandle = m . getSpHandle ( ) ; } } else if ( msg instanceof CompleteTransactionMessage ) { CompleteTransactionMessage ctm = ( CompleteTransactionMessage ) msg ; m_lastMpHandle = Math . max ( m_lastMpHandle , ctm . getTxnId ( ) ) ; if ( ctm . isReadOnly ( ) || ctm . isRestart ( ) || ctm . isAbortDuringRepair ( ) ) { return ; } truncate ( ctm . getTruncationHandle ( ) , IS_MP ) ; m_logMP . add ( new Item ( IS_MP , ctm , ctm . getSpHandle ( ) , ctm . getTxnId ( ) ) ) ; m_lastSpHandle = ctm . getSpHandle ( ) ; } else if ( msg instanceof DumpMessage ) { String who = CoreUtils . hsIdToString ( m_HSId ) ; repairLogger . warn ( "Repair log dump for site: " + who + ", isLeader: " + m_isLeader + ", " + who + ": lastSpHandle: " + m_lastSpHandle + ", lastMpHandle: " + m_lastMpHandle ) ; for ( Iv2RepairLogResponseMessage il : contents ( 0l , false ) ) { repairLogger . warn ( "[Repair log contents]" + who + ": msg: " + il ) ; } } else if ( msg instanceof DummyTransactionTaskMessage ) { m_lastSpHandle = Math . max ( m_lastSpHandle , ( ( DummyTransactionTaskMessage ) msg ) . getSpHandle ( ) ) ; } else if ( msg instanceof RepairLogTruncationMessage ) { final RepairLogTruncationMessage truncateMsg = ( RepairLogTruncationMessage ) msg ; truncate ( truncateMsg . getHandle ( ) , IS_SP ) ; } }
the repairLog if the message includes a truncation hint .
32,139
private void truncate ( long handle , boolean isSP ) { if ( handle == Long . MIN_VALUE ) { return ; } Deque < RepairLog . Item > deq = null ; if ( isSP ) { deq = m_logSP ; if ( m_truncationHandle < handle ) { m_truncationHandle = handle ; notifyTxnCommitInterests ( handle ) ; } } else { deq = m_logMP ; } RepairLog . Item item = null ; while ( ( item = deq . peek ( ) ) != null ) { if ( item . canTruncate ( handle ) ) { deq . poll ( ) ; } else { break ; } } }
trim unnecessary log messages .
32,140
public List < Iv2RepairLogResponseMessage > contents ( long requestId , boolean forMPI ) { List < Item > items = new LinkedList < Item > ( ) ; items . addAll ( m_logMP ) ; if ( ! forMPI ) { items . addAll ( m_logSP ) ; } Collections . sort ( items , m_handleComparator ) ; int ofTotal = items . size ( ) + 1 ; if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( "Responding with " + ofTotal + " repair log parts." ) ; } List < Iv2RepairLogResponseMessage > responses = new LinkedList < Iv2RepairLogResponseMessage > ( ) ; Iv2RepairLogResponseMessage hheader = new Iv2RepairLogResponseMessage ( requestId , ofTotal , m_lastSpHandle , m_lastMpHandle , TheHashinator . getCurrentVersionedConfigCooked ( ) ) ; responses . add ( hheader ) ; int seq = responses . size ( ) ; Iterator < Item > itemator = items . iterator ( ) ; while ( itemator . hasNext ( ) ) { Item item = itemator . next ( ) ; Iv2RepairLogResponseMessage response = new Iv2RepairLogResponseMessage ( requestId , seq ++ , ofTotal , item . getHandle ( ) , item . getTxnId ( ) , item . getMessage ( ) ) ; responses . add ( response ) ; } return responses ; }
produce the contents of the repair log .
32,141
public final synchronized void endProcedure ( boolean aborted , boolean failed , SingleCallStatsToken statsToken ) { if ( aborted ) { m_procStatsData . m_abortCount ++ ; } if ( failed ) { m_procStatsData . m_failureCount ++ ; } m_procStatsData . m_invocations ++ ; if ( ! statsToken . samplingProcedure ( ) ) { return ; } final long endTime = System . nanoTime ( ) ; final long duration = endTime - statsToken . startTimeNanos ; if ( duration < 0 ) { if ( Math . abs ( duration ) > 1000000000 ) { log . info ( "Procedure: " + m_procName + " recorded a negative execution time larger than one second: " + duration ) ; } return ; } m_procStatsData . m_timedInvocations ++ ; m_procStatsData . m_totalTimedExecutionTime += duration ; m_procStatsData . m_minExecutionTime = Math . min ( duration , m_procStatsData . m_minExecutionTime ) ; m_procStatsData . m_maxExecutionTime = Math . max ( duration , m_procStatsData . m_maxExecutionTime ) ; m_procStatsData . m_incrMinExecutionTime = Math . min ( duration , m_procStatsData . m_incrMinExecutionTime ) ; m_procStatsData . m_incrMaxExecutionTime = Math . max ( duration , m_procStatsData . m_incrMaxExecutionTime ) ; m_procStatsData . m_totalResultSize += statsToken . resultSize ; m_procStatsData . m_minResultSize = Math . min ( statsToken . resultSize , m_procStatsData . m_minResultSize ) ; m_procStatsData . m_maxResultSize = Math . max ( statsToken . resultSize , m_procStatsData . m_maxResultSize ) ; m_procStatsData . m_incrMinResultSize = Math . min ( statsToken . resultSize , m_procStatsData . m_incrMinResultSize ) ; m_procStatsData . m_incrMaxResultSize = Math . max ( statsToken . resultSize , m_procStatsData . m_incrMaxResultSize ) ; m_procStatsData . m_totalParameterSetSize += statsToken . parameterSetSize ; m_procStatsData . m_minParameterSetSize = Math . min ( statsToken . parameterSetSize , m_procStatsData . m_minParameterSetSize ) ; m_procStatsData . m_maxParameterSetSize = Math . max ( statsToken . parameterSetSize , m_procStatsData . m_maxParameterSetSize ) ; m_procStatsData . m_incrMinParameterSetSize = Math . min ( statsToken . parameterSetSize , m_procStatsData . m_incrMinParameterSetSize ) ; m_procStatsData . m_incrMaxParameterSetSize = Math . max ( statsToken . parameterSetSize , m_procStatsData . m_incrMaxParameterSetSize ) ; if ( statsToken . stmtStats == null ) { return ; } for ( SingleCallStatsToken . PerStmtStats pss : statsToken . stmtStats ) { long stmtDuration = 0 ; int stmtResultSize = 0 ; int stmtParameterSetSize = 0 ; if ( pss . measurements != null ) { stmtDuration = pss . measurements . stmtDuration ; stmtResultSize = pss . measurements . stmtResultSize ; stmtParameterSetSize = pss . measurements . stmtParameterSetSize ; } endFragment ( pss . stmtName , pss . isCoordinatorTask , pss . stmtFailed , pss . measurements != null , stmtDuration , stmtResultSize , stmtParameterSetSize ) ; } }
Called after a procedure is finished executing . Compares the start and end time and calculates the statistics .
32,142
public final synchronized void endFragment ( String stmtName , boolean isCoordinatorTask , boolean failed , boolean sampledStmt , long duration , int resultSize , int parameterSetSize ) { if ( stmtName == null ) { return ; } StatementStats stmtStats = m_stmtStatsMap . get ( stmtName ) ; if ( stmtStats == null ) { return ; } StatsData dataToUpdate = isCoordinatorTask ? stmtStats . m_coordinatorTask : stmtStats . m_workerTask ; if ( failed ) { dataToUpdate . m_failureCount ++ ; } dataToUpdate . m_invocations ++ ; if ( ! sampledStmt ) { return ; } if ( duration < 0 ) { if ( Math . abs ( duration ) > 1000000000 ) { log . info ( "Statement: " + stmtStats . m_stmtName + " in procedure: " + m_procName + " recorded a negative execution time larger than one second: " + duration ) ; } return ; } dataToUpdate . m_timedInvocations ++ ; dataToUpdate . m_totalTimedExecutionTime += duration ; dataToUpdate . m_minExecutionTime = Math . min ( duration , dataToUpdate . m_minExecutionTime ) ; dataToUpdate . m_maxExecutionTime = Math . max ( duration , dataToUpdate . m_maxExecutionTime ) ; dataToUpdate . m_incrMinExecutionTime = Math . min ( duration , dataToUpdate . m_incrMinExecutionTime ) ; dataToUpdate . m_incrMaxExecutionTime = Math . max ( duration , dataToUpdate . m_incrMaxExecutionTime ) ; dataToUpdate . m_totalResultSize += resultSize ; dataToUpdate . m_minResultSize = Math . min ( resultSize , dataToUpdate . m_minResultSize ) ; dataToUpdate . m_maxResultSize = Math . max ( resultSize , dataToUpdate . m_maxResultSize ) ; dataToUpdate . m_incrMinResultSize = Math . min ( resultSize , dataToUpdate . m_incrMinResultSize ) ; dataToUpdate . m_incrMaxResultSize = Math . max ( resultSize , dataToUpdate . m_incrMaxResultSize ) ; dataToUpdate . m_totalParameterSetSize += parameterSetSize ; dataToUpdate . m_minParameterSetSize = Math . min ( parameterSetSize , dataToUpdate . m_minParameterSetSize ) ; dataToUpdate . m_maxParameterSetSize = Math . max ( parameterSetSize , dataToUpdate . m_maxParameterSetSize ) ; dataToUpdate . m_incrMinParameterSetSize = Math . min ( parameterSetSize , dataToUpdate . m_incrMinParameterSetSize ) ; dataToUpdate . m_incrMaxParameterSetSize = Math . max ( parameterSetSize , dataToUpdate . m_incrMaxParameterSetSize ) ; }
This function will be called after a statement finish running . It updates the data structures to maintain the statistics .
32,143
public synchronized Session newSession ( Database db , User user , boolean readonly , boolean forLog , int timeZoneSeconds ) { Session s = new Session ( db , user , ! forLog , ! forLog , readonly , sessionIdCount , timeZoneSeconds ) ; s . isProcessingLog = forLog ; sessionMap . put ( sessionIdCount , s ) ; sessionIdCount ++ ; return s ; }
Binds the specified Session object into this SessionManager s active Session registry . This method is typically called internally as the final step when a successful connection has been made .
32,144
public Session getSysSessionForScript ( Database db ) { Session session = new Session ( db , db . getUserManager ( ) . getSysUser ( ) , false , false , false , 0 , 0 ) ; session . isProcessingScript = true ; return session ; }
Retrieves a new SYS Session .
32,145
public synchronized void closeAllSessions ( ) { Session [ ] sessions = getAllSessions ( ) ; for ( int i = 0 ; i < sessions . length ; i ++ ) { sessions [ i ] . close ( ) ; } }
Closes all Sessions registered with this SessionManager .
32,146
public HostAndPort withDefaultPort ( int defaultPort ) { checkArgument ( isValidPort ( defaultPort ) ) ; if ( hasPort ( ) || port == defaultPort ) { return this ; } return new HostAndPort ( host , defaultPort , hasBracketlessColons ) ; }
Provide a default port if the parsed string contained only a host .
32,147
public Runnable writeCatalogJarToFile ( String path , String name , CatalogJarWriteMode mode ) throws IOException { File catalogFile = new VoltFile ( path , name ) ; File catalogTmpFile = new VoltFile ( path , name + ".tmp" ) ; if ( mode == CatalogJarWriteMode . CATALOG_UPDATE ) { catalogFile . delete ( ) ; catalogTmpFile . renameTo ( catalogFile ) ; return null ; } if ( mode == CatalogJarWriteMode . START_OR_RESTART ) { return m_catalogInfo . m_jarfile . writeToFile ( catalogFile ) ; } if ( mode == CatalogJarWriteMode . RECOVER ) { catalogFile . delete ( ) ; if ( catalogTmpFile . exists ( ) ) { catalogTmpFile . delete ( ) ; } return m_catalogInfo . m_jarfile . writeToFile ( catalogFile ) ; } VoltDB . crashLocalVoltDB ( "Unsupported mode to write catalog jar" , true , null ) ; return null ; }
Write replace or update the catalog jar based on different cases . This function assumes any IOException should lead to fatal crash .
32,148
public Class < ? > classForProcedureOrUDF ( String procedureClassName ) throws LinkageError , ExceptionInInitializerError , ClassNotFoundException { return classForProcedureOrUDF ( procedureClassName , m_catalogInfo . m_jarfile . getLoader ( ) ) ; }
Given a class name in the catalog jar loads it from the jar even if the jar is served from an URL and isn t in the classpath .
32,149
public DeploymentType getDeployment ( ) { if ( m_memoizedDeployment == null ) { m_memoizedDeployment = CatalogUtil . getDeployment ( new ByteArrayInputStream ( m_catalogInfo . m_deploymentBytes ) ) ; if ( m_memoizedDeployment == null ) { VoltDB . crashLocalVoltDB ( "The internal deployment bytes are invalid. This should never occur; please contact VoltDB support with your logfiles." ) ; } } return m_memoizedDeployment ; }
Get the JAXB XML Deployment object which is memoized
32,150
public boolean removeAfter ( Node node ) { if ( node == null || node . next == null ) { return false ; } if ( node . next == last ) { last = node ; } node . next = node . next . next ; return true ; }
Removes the given node to allow removel from iterators
32,151
protected ProcedurePartitionData parseCreateProcedureClauses ( ProcedureDescriptor descriptor , String clauses ) throws VoltCompilerException { if ( clauses == null || clauses . isEmpty ( ) ) { return null ; } ProcedurePartitionData data = null ; Matcher matcher = SQLParser . matchAnyCreateProcedureStatementClause ( clauses ) ; int start = 0 ; while ( matcher . find ( start ) ) { start = matcher . end ( ) ; if ( matcher . group ( 1 ) != null ) { for ( String roleName : StringUtils . split ( matcher . group ( 1 ) , ',' ) ) { String roleNameFixed = roleName . trim ( ) . toLowerCase ( ) ; if ( ! descriptor . m_authGroups . contains ( roleNameFixed ) ) { descriptor . m_authGroups . add ( roleNameFixed ) ; } } } else { if ( data != null ) { throw m_compiler . new VoltCompilerException ( "Only one PARTITION clause is allowed for CREATE PROCEDURE." ) ; } data = new ProcedurePartitionData ( matcher . group ( 2 ) , matcher . group ( 3 ) , matcher . group ( 4 ) , matcher . group ( 5 ) , matcher . group ( 6 ) , matcher . group ( 7 ) ) ; } } return data ; }
Parse and validate the substring containing ALLOW and PARTITION clauses for CREATE PROCEDURE .
32,152
public static void interactWithTheUser ( ) throws Exception { final SQLConsoleReader interactiveReader = new SQLConsoleReader ( new FileInputStream ( FileDescriptor . in ) , System . out ) ; interactiveReader . setBellEnabled ( false ) ; FileHistory historyFile = null ; try { historyFile = new FileHistory ( new File ( System . getProperty ( "user.home" ) , ".sqlcmd_history" ) ) ; interactiveReader . setHistory ( historyFile ) ; KeyMap keyMap = interactiveReader . getKeys ( ) ; keyMap . bind ( new Character ( KeyMap . CTRL_D ) . toString ( ) , new ActionListener ( ) { public void actionPerformed ( ActionEvent e ) { CursorBuffer cursorBuffer = interactiveReader . getCursorBuffer ( ) ; if ( cursorBuffer . length ( ) == 0 ) { throw new SQLCmdEarlyExitException ( ) ; } else { try { interactiveReader . delete ( ) ; } catch ( IOException e1 ) { } } } } ) ; getInteractiveQueries ( interactiveReader ) ; } finally { if ( historyFile != null ) { try { historyFile . flush ( ) ; } catch ( IOException e ) { System . err . printf ( "* Unable to write history to \"%s\" *\n" , historyFile . getFile ( ) . getPath ( ) ) ; if ( m_debug ) { e . printStackTrace ( ) ; } } } if ( interactiveReader != null ) { interactiveReader . shutdown ( ) ; } } }
The main loop for interactive mode .
32,153
static void executeScriptFiles ( List < FileInfo > filesInfo , SQLCommandLineReader parentLineReader , DDLParserCallback callback ) throws IOException { LineReaderAdapter adapter = null ; SQLCommandLineReader reader = null ; StringBuilder statements = new StringBuilder ( ) ; if ( ! m_interactive && callback == null ) { System . out . println ( ) ; StringBuilder commandString = new StringBuilder ( ) ; commandString . append ( filesInfo . get ( 0 ) . toString ( ) ) ; for ( int ii = 1 ; ii < filesInfo . size ( ) ; ii ++ ) { commandString . append ( " " + filesInfo . get ( ii ) . getFile ( ) . toString ( ) ) ; } System . out . println ( commandString . toString ( ) ) ; } for ( int ii = 0 ; ii < filesInfo . size ( ) ; ii ++ ) { FileInfo fileInfo = filesInfo . get ( ii ) ; adapter = null ; reader = null ; if ( fileInfo . getOption ( ) == FileOption . INLINEBATCH ) { reader = parentLineReader ; } else { try { reader = adapter = new LineReaderAdapter ( new FileReader ( fileInfo . getFile ( ) ) ) ; } catch ( FileNotFoundException e ) { System . err . println ( "Script file '" + fileInfo . getFile ( ) + "' could not be found." ) ; stopOrContinue ( e ) ; return ; } if ( fileInfo . getOption ( ) == FileOption . BATCH ) { String line ; while ( ( line = reader . readBatchLine ( ) ) != null ) { statements . append ( line ) . append ( "\n" ) ; } reader = null ; if ( ii == filesInfo . size ( ) - 1 ) { String allStatements = statements . toString ( ) ; byte [ ] bytes = allStatements . getBytes ( "UTF-8" ) ; ByteArrayInputStream bais = new ByteArrayInputStream ( bytes ) ; reader = adapter = new LineReaderAdapter ( new InputStreamReader ( bais ) ) ; } } } try { executeScriptFromReader ( fileInfo , reader , callback ) ; } catch ( SQLCmdEarlyExitException e ) { throw e ; } catch ( Exception x ) { stopOrContinue ( x ) ; } finally { if ( adapter != null ) { adapter . close ( ) ; } } } }
Reads a script file and executes its content . Note that the script file could be an inline batch i . e . a here document that is coming from the same input stream as the file directive .
32,154
private static void printUsage ( String msg ) { System . out . print ( msg ) ; System . out . println ( "\n" ) ; m_exitCode = - 1 ; printUsage ( ) ; }
General application support
32,155
static void printHelp ( OutputStream prtStr ) { try { InputStream is = SQLCommand . class . getResourceAsStream ( m_readme ) ; while ( is . available ( ) > 0 ) { byte [ ] bytes = new byte [ is . available ( ) ] ; is . read ( bytes , 0 , bytes . length ) ; prtStr . write ( bytes ) ; } } catch ( Exception x ) { System . err . println ( x . getMessage ( ) ) ; m_exitCode = - 1 ; return ; } }
Default visibility is for test purposes .
32,156
public static void main ( String args [ ] ) { System . setProperty ( "voltdb_no_logging" , "true" ) ; int exitCode = mainWithReturnCode ( args ) ; System . exit ( exitCode ) ; }
Application entry point
32,157
private synchronized void checkTimeout ( final long timeoutMs ) { final Entry < Integer , SendWork > oldest = m_outstandingWork . firstEntry ( ) ; if ( oldest != null ) { final long now = System . currentTimeMillis ( ) ; SendWork work = oldest . getValue ( ) ; if ( ( now - work . m_ts ) > timeoutMs ) { StreamSnapshotTimeoutException exception = new StreamSnapshotTimeoutException ( String . format ( "A snapshot write task failed after a timeout (currently %d seconds outstanding). " + "Node rejoin may need to be retried" , ( now - work . m_ts ) / 1000 ) ) ; rejoinLog . error ( exception . getMessage ( ) ) ; m_writeFailed . compareAndSet ( null , exception ) ; } } }
Called by the watchdog from the periodic work thread to check if the oldest unacked block is older than the timeout interval .
32,158
synchronized void clearOutstanding ( ) { if ( m_outstandingWork . isEmpty ( ) && ( m_outstandingWorkCount . get ( ) == 0 ) ) { return ; } rejoinLog . trace ( "Clearing outstanding work." ) ; for ( Entry < Integer , SendWork > e : m_outstandingWork . entrySet ( ) ) { e . getValue ( ) . discard ( ) ; } m_outstandingWork . clear ( ) ; m_outstandingWorkCount . set ( 0 ) ; }
Idempotent synchronized method to perform all cleanup of outstanding work so buffers aren t leaked .
32,159
public synchronized void receiveAck ( int blockIndex ) { SendWork work = m_outstandingWork . get ( blockIndex ) ; if ( work == null || work . m_ackCounter == null ) { rejoinLog . warn ( "Received invalid blockIndex ack for targetId " + m_targetId + " for index " + String . valueOf ( blockIndex ) + ( ( work == null ) ? " already removed the block." : " ack counter haven't been initialized." ) ) ; return ; } if ( work . receiveAck ( ) ) { rejoinLog . trace ( "Received ack for targetId " + m_targetId + " removes block for index " + String . valueOf ( blockIndex ) ) ; m_outstandingWorkCount . decrementAndGet ( ) ; m_outstandingWork . remove ( blockIndex ) ; work . discard ( ) ; } else { rejoinLog . trace ( "Received ack for targetId " + m_targetId + " decrements counter for block index " + String . valueOf ( blockIndex ) ) ; } }
Synchronized method to handle the arrival of an Ack .
32,160
synchronized ListenableFuture < Boolean > send ( StreamSnapshotMessageType type , int blockIndex , BBContainer chunk , boolean replicatedTable ) { SettableFuture < Boolean > sendFuture = SettableFuture . create ( ) ; rejoinLog . trace ( "Sending block " + blockIndex + " of type " + ( replicatedTable ? "REPLICATED " : "PARTITIONED " ) + type . name ( ) + " from targetId " + m_targetId + " to " + CoreUtils . hsIdToString ( m_destHSId ) + ( replicatedTable ? ", " + CoreUtils . hsIdCollectionToString ( m_otherDestHostHSIds ) : "" ) ) ; SendWork sendWork = new SendWork ( type , m_targetId , m_destHSId , replicatedTable ? m_otherDestHostHSIds : null , chunk , sendFuture ) ; m_outstandingWork . put ( blockIndex , sendWork ) ; m_outstandingWorkCount . incrementAndGet ( ) ; m_sender . offer ( sendWork ) ; return sendFuture ; }
Send data to the rejoining node tracking what was sent for ack tracking . Synchronized to protect access to m_outstandingWork and to keep m_outstandingWorkCount in sync with m_outstandingWork .
32,161
public static String toSchemaWithoutInlineBatches ( String schema ) { StringBuilder sb = new StringBuilder ( schema ) ; int i = sb . indexOf ( batchSpecificComments ) ; if ( i != - 1 ) { sb . delete ( i , i + batchSpecificComments . length ( ) ) ; } i = sb . indexOf ( startBatch ) ; if ( i != - 1 ) { sb . delete ( i , i + startBatch . length ( ) ) ; } i = sb . indexOf ( endBatch ) ; if ( i != - 1 ) { sb . delete ( i , i + endBatch . length ( ) ) ; } return sb . toString ( ) ; }
Given a schema strips out inline batch statements and associated comments .
32,162
final void shutdown ( ) throws InterruptedException { m_timeoutReaperHandle . cancel ( false ) ; m_ex . shutdown ( ) ; if ( CoreUtils . isJunitTest ( ) ) { m_ex . awaitTermination ( 1 , TimeUnit . SECONDS ) ; } else { m_ex . awaitTermination ( 365 , TimeUnit . DAYS ) ; } m_network . shutdown ( ) ; if ( m_cipherService != null ) { m_cipherService . shutdown ( ) ; m_cipherService = null ; } }
Shutdown the VoltNetwork allowing the Ports to close and free resources like memory pools
32,163
public long getPartitionForParameter ( byte typeValue , Object value ) { if ( m_hashinator == null ) { return - 1 ; } return m_hashinator . getHashedPartitionForParameter ( typeValue , value ) ; }
This is used by clients such as CSVLoader which puts processing into buckets .
32,164
private void refreshPartitionKeys ( boolean topologyUpdate ) { long interval = System . currentTimeMillis ( ) - m_lastPartitionKeyFetched . get ( ) ; if ( ! m_useClientAffinity && interval < PARTITION_KEYS_INFO_REFRESH_FREQUENCY ) { return ; } try { ProcedureInvocation invocation = new ProcedureInvocation ( m_sysHandle . getAndDecrement ( ) , "@GetPartitionKeys" , "INTEGER" ) ; CountDownLatch latch = null ; if ( ! topologyUpdate ) { latch = new CountDownLatch ( 1 ) ; } PartitionUpdateCallback cb = new PartitionUpdateCallback ( latch ) ; if ( ! queue ( invocation , cb , true , System . nanoTime ( ) , USE_DEFAULT_CLIENT_TIMEOUT ) ) { m_partitionUpdateStatus . set ( new ClientResponseImpl ( ClientResponseImpl . SERVER_UNAVAILABLE , new VoltTable [ 0 ] , "Fails to queue the partition update query, please try later." ) ) ; } if ( ! topologyUpdate ) { latch . await ( ) ; } m_lastPartitionKeyFetched . set ( System . currentTimeMillis ( ) ) ; } catch ( InterruptedException | IOException e ) { m_partitionUpdateStatus . set ( new ClientResponseImpl ( ClientResponseImpl . SERVER_UNAVAILABLE , new VoltTable [ 0 ] , "Fails to fetch partition keys from server:" + e . getMessage ( ) ) ) ; } }
Set up partitions .
32,165
public void addSortExpressions ( List < AbstractExpression > sortExprs , List < SortDirectionType > sortDirs ) { assert ( sortExprs . size ( ) == sortDirs . size ( ) ) ; for ( int i = 0 ; i < sortExprs . size ( ) ; ++ i ) { addSortExpression ( sortExprs . get ( i ) , sortDirs . get ( i ) ) ; } }
Add multiple sort expressions to the order - by
32,166
public void addSortExpression ( AbstractExpression sortExpr , SortDirectionType sortDir ) { assert ( sortExpr != null ) ; m_sortExpressions . add ( sortExpr . clone ( ) ) ; m_sortDirections . add ( sortDir ) ; }
Add a sort expression to the order - by
32,167
static java . util . logging . Level getPriorityForLevel ( Level level ) { switch ( level ) { case DEBUG : return java . util . logging . Level . FINEST ; case ERROR : return java . util . logging . Level . SEVERE ; case FATAL : return java . util . logging . Level . SEVERE ; case INFO : return java . util . logging . Level . INFO ; case TRACE : return java . util . logging . Level . FINER ; case WARN : return java . util . logging . Level . WARNING ; default : return null ; } }
Convert the VoltLogger Level to the java . ulil . logging Level
32,168
void checkAddColumn ( ColumnSchema col ) { if ( table . isText ( ) && ! table . isEmpty ( session ) ) { throw Error . error ( ErrorCode . X_S0521 ) ; } if ( table . findColumn ( col . getName ( ) . name ) != - 1 ) { throw Error . error ( ErrorCode . X_42504 ) ; } if ( col . isPrimaryKey ( ) && table . hasPrimaryKey ( ) ) { throw Error . error ( ErrorCode . X_42530 ) ; } if ( col . isIdentity ( ) && table . hasIdentityColumn ( ) ) { throw Error . error ( ErrorCode . X_42525 ) ; } if ( ! table . isEmpty ( session ) && ! col . hasDefault ( ) && ( ! col . isNullable ( ) || col . isPrimaryKey ( ) ) && ! col . isIdentity ( ) ) { throw Error . error ( ErrorCode . X_42531 ) ; } }
Checks if the attributes of the Column argument c are compatible with the operation of adding such a Column to the Table argument table .
32,169
void makeNewTable ( OrderedHashSet dropConstraintSet , OrderedHashSet dropIndexSet ) { Table tn = table . moveDefinition ( session , table . tableType , null , null , null , - 1 , 0 , dropConstraintSet , dropIndexSet ) ; if ( tn . indexList . length == table . indexList . length ) { database . persistentStoreCollection . releaseStore ( tn ) ; return ; } tn . moveData ( session , table , - 1 , 0 ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; }
Drops constriants and their indexes in table . Uses set of names .
32,170
Index addIndex ( int [ ] col , HsqlName name , boolean unique , boolean migrating ) { Index newindex ; if ( table . isEmpty ( session ) || table . isIndexingMutable ( ) ) { PersistentStore store = session . sessionData . getRowStore ( table ) ; newindex = table . createIndex ( store , name , col , null , null , unique , migrating , false , false ) ; } else { newindex = table . createIndexStructure ( name , col , null , null , unique , migrating , false , false ) ; Table tn = table . moveDefinition ( session , table . tableType , null , null , newindex , - 1 , 0 , emptySet , emptySet ) ; tn . moveData ( session , table , - 1 , 0 ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; setNewTableInSchema ( table ) ; updateConstraints ( table , emptySet ) ; } database . schemaManager . addSchemaObject ( newindex ) ; database . schemaManager . recompileDependentObjects ( table ) ; return newindex ; }
Because of the way indexes and column data are held in memory and on disk it is necessary to recreate the table when an index is added to a non - empty cached table .
32,171
void dropIndex ( String indexName ) { Index index ; index = table . getIndex ( indexName ) ; if ( table . isIndexingMutable ( ) ) { table . dropIndex ( session , indexName ) ; } else { OrderedHashSet indexSet = new OrderedHashSet ( ) ; indexSet . add ( table . getIndex ( indexName ) . getName ( ) ) ; Table tn = table . moveDefinition ( session , table . tableType , null , null , null , - 1 , 0 , emptySet , indexSet ) ; tn . moveData ( session , table , - 1 , 0 ) ; updateConstraints ( tn , emptySet ) ; setNewTableInSchema ( tn ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; } if ( ! index . isConstraint ( ) ) { database . schemaManager . removeSchemaObject ( index . getName ( ) ) ; } database . schemaManager . recompileDependentObjects ( table ) ; }
Because of the way indexes and column data are held in memory and on disk it is necessary to recreate the table when an index is added to or removed from a non - empty table .
32,172
void retypeColumn ( ColumnSchema oldCol , ColumnSchema newCol ) { boolean allowed = true ; int oldType = oldCol . getDataType ( ) . typeCode ; int newType = newCol . getDataType ( ) . typeCode ; if ( ! table . isEmpty ( session ) && oldType != newType ) { allowed = newCol . getDataType ( ) . canConvertFrom ( oldCol . getDataType ( ) ) ; switch ( oldType ) { case Types . SQL_BLOB : case Types . SQL_CLOB : case Types . OTHER : case Types . JAVA_OBJECT : allowed = false ; break ; } } if ( ! allowed ) { throw Error . error ( ErrorCode . X_42561 ) ; } int colIndex = table . getColumnIndex ( oldCol . getName ( ) . name ) ; if ( newCol . isIdentity ( ) && table . hasIdentityColumn ( ) && table . identityColumn != colIndex ) { throw Error . error ( ErrorCode . X_42525 ) ; } if ( table . getPrimaryKey ( ) . length > 1 ) { newCol . setPrimaryKey ( oldCol . isPrimaryKey ( ) ) ; if ( ArrayUtil . find ( table . getPrimaryKey ( ) , colIndex ) != - 1 ) { } } else if ( table . hasPrimaryKey ( ) ) { if ( oldCol . isPrimaryKey ( ) ) { newCol . setPrimaryKey ( true ) ; } else if ( newCol . isPrimaryKey ( ) ) { throw Error . error ( ErrorCode . X_42532 ) ; } } else if ( newCol . isPrimaryKey ( ) ) { throw Error . error ( ErrorCode . X_42530 ) ; } boolean meta = newType == oldType ; meta &= oldCol . isNullable ( ) == newCol . isNullable ( ) ; meta &= oldCol . getDataType ( ) . scale == newCol . getDataType ( ) . scale ; meta &= ( oldCol . isIdentity ( ) == newCol . isIdentity ( ) ) ; meta &= ( oldCol . getDataType ( ) . precision == newCol . getDataType ( ) . precision || ( oldCol . getDataType ( ) . precision < newCol . getDataType ( ) . precision && ( oldType == Types . SQL_VARCHAR || oldType == Types . SQL_VARBINARY ) ) ) ; if ( meta ) { oldCol . setType ( newCol ) ; oldCol . setDefaultExpression ( newCol . getDefaultExpression ( ) ) ; if ( newCol . isIdentity ( ) ) { oldCol . setIdentity ( newCol . getIdentitySequence ( ) ) ; } table . setColumnTypeVars ( colIndex ) ; table . resetDefaultsFlag ( ) ; return ; } database . schemaManager . checkColumnIsReferenced ( table . getName ( ) , table . getColumn ( colIndex ) . getName ( ) ) ; table . checkColumnInCheckConstraint ( colIndex ) ; table . checkColumnInFKConstraint ( colIndex ) ; checkConvertColDataType ( oldCol , newCol ) ; retypeColumn ( newCol , colIndex ) ; }
Allows changing the type or addition of an IDENTITY sequence .
32,173
void setColNullability ( ColumnSchema column , boolean nullable ) { Constraint c = null ; int colIndex = table . getColumnIndex ( column . getName ( ) . name ) ; if ( column . isNullable ( ) == nullable ) { return ; } if ( nullable ) { if ( column . isPrimaryKey ( ) ) { throw Error . error ( ErrorCode . X_42526 ) ; } table . checkColumnInFKConstraint ( colIndex , Constraint . SET_NULL ) ; removeColumnNotNullConstraints ( colIndex ) ; } else { HsqlName constName = database . nameManager . newAutoName ( "CT" , table . getSchemaName ( ) , table . getName ( ) , SchemaObject . CONSTRAINT ) ; c = new Constraint ( constName , true , null , Constraint . CHECK ) ; c . check = new ExpressionLogical ( column ) ; c . prepareCheckConstraint ( session , table , true ) ; column . setNullable ( false ) ; table . addConstraint ( c ) ; table . setColumnTypeVars ( colIndex ) ; database . schemaManager . addSchemaObject ( c ) ; } }
performs the work for changing the nullability of a column
32,174
void setColDefaultExpression ( int colIndex , Expression def ) { if ( def == null ) { table . checkColumnInFKConstraint ( colIndex , Constraint . SET_DEFAULT ) ; } table . setDefaultExpression ( colIndex , def ) ; }
performs the work for changing the default value of a column
32,175
public boolean setTableType ( Session session , int newType ) { int currentType = table . getTableType ( ) ; if ( currentType == newType ) { return false ; } switch ( newType ) { case TableBase . CACHED_TABLE : break ; case TableBase . MEMORY_TABLE : break ; default : return false ; } Table tn ; try { tn = table . moveDefinition ( session , newType , null , null , null , - 1 , 0 , emptySet , emptySet ) ; tn . moveData ( session , table , - 1 , 0 ) ; updateConstraints ( tn , emptySet ) ; } catch ( HsqlException e ) { return false ; } setNewTableInSchema ( tn ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; database . schemaManager . recompileDependentObjects ( table ) ; return true ; }
Changes the type of a table
32,176
Index addExprIndex ( int [ ] col , Expression [ ] indexExprs , HsqlName name , boolean unique , boolean migrating , Expression predicate ) { Index newindex ; if ( table . isEmpty ( session ) || table . isIndexingMutable ( ) ) { newindex = table . createAndAddExprIndexStructure ( name , col , indexExprs , unique , migrating , false ) ; } else { newindex = table . createIndexStructure ( name , col , null , null , unique , migrating , false , false ) . withExpressions ( indexExprs ) ; Table tn = table . moveDefinition ( session , table . tableType , null , null , newindex , - 1 , 0 , emptySet , emptySet ) ; tn . moveData ( session , table , - 1 , 0 ) ; database . persistentStoreCollection . releaseStore ( table ) ; table = tn ; setNewTableInSchema ( table ) ; updateConstraints ( table , emptySet ) ; } database . schemaManager . addSchemaObject ( newindex ) ; database . schemaManager . recompileDependentObjects ( table ) ; if ( predicate != null ) { newindex = newindex . withPredicate ( predicate ) ; } return newindex ; }
A VoltDB extended variant of addIndex that supports indexed generalized non - column expressions .
32,177
Index addIndex ( int [ ] col , HsqlName name , boolean unique , boolean migrating , Expression predicate ) { return addIndex ( col , name , unique , migrating ) . withPredicate ( predicate ) ; }
A VoltDB extended variant of addIndex that supports partial index predicate .
32,178
static public ParsedColInfo fromOrderByXml ( AbstractParsedStmt parsedStmt , VoltXMLElement orderByXml ) { ExpressionAdjuster adjuster = new ExpressionAdjuster ( ) { public AbstractExpression adjust ( AbstractExpression expr ) { ExpressionUtil . finalizeValueTypes ( expr ) ; return expr ; } } ; return fromOrderByXml ( parsedStmt , orderByXml , adjuster ) ; }
Construct a ParsedColInfo from Volt XML .
32,179
static public ParsedColInfo fromOrderByXml ( AbstractParsedStmt parsedStmt , VoltXMLElement orderByXml , ExpressionAdjuster adjuster ) { assert ( orderByXml . name . equalsIgnoreCase ( "orderby" ) ) ; String desc = orderByXml . attributes . get ( "desc" ) ; boolean descending = ( desc != null ) && ( desc . equalsIgnoreCase ( "true" ) ) ; VoltXMLElement child = orderByXml . children . get ( 0 ) ; assert ( child != null ) ; ParsedColInfo orderCol = new ParsedColInfo ( ) ; orderCol . m_orderBy = true ; orderCol . m_ascending = ! descending ; AbstractExpression orderExpr = parsedStmt . parseExpressionTree ( child ) ; assert ( orderExpr != null ) ; orderCol . m_expression = adjuster . adjust ( orderExpr ) ; if ( orderExpr instanceof TupleValueExpression ) { TupleValueExpression tve = ( TupleValueExpression ) orderExpr ; orderCol . m_columnName = tve . getColumnName ( ) ; orderCol . m_tableName = tve . getTableName ( ) ; orderCol . m_tableAlias = tve . getTableAlias ( ) ; if ( orderCol . m_tableAlias == null ) { orderCol . m_tableAlias = orderCol . m_tableName ; } orderCol . m_alias = tve . getColumnAlias ( ) ; } else { String alias = child . attributes . get ( "alias" ) ; orderCol . m_alias = alias ; orderCol . m_tableName = AbstractParsedStmt . TEMP_TABLE_NAME ; orderCol . m_tableAlias = AbstractParsedStmt . TEMP_TABLE_NAME ; orderCol . m_columnName = "" ; if ( ( child . name . equals ( "operation" ) == false ) && ( child . name . equals ( "aggregation" ) == false ) && ( child . name . equals ( "win_aggregation" ) == false ) && ( child . name . equals ( "function" ) == false ) && ( child . name . equals ( "rank" ) == false ) && ( child . name . equals ( "value" ) == false ) && ( child . name . equals ( "columnref" ) == false ) ) { throw new RuntimeException ( "ORDER BY parsed with strange child node type: " + child . name ) ; } } return orderCol ; }
Construct a ParsedColInfo from Volt XML . Allow caller to specify actions to finalize the parsed expression .
32,180
public SchemaColumn asSchemaColumn ( ) { String columnAlias = ( m_alias == null ) ? m_columnName : m_alias ; return new SchemaColumn ( m_tableName , m_tableAlias , m_columnName , columnAlias , m_expression , m_differentiator ) ; }
Return this as an instance of SchemaColumn
32,181
public static void crashVoltDB ( String reason , String traces [ ] , String filename , int lineno ) { VoltLogger hostLog = new VoltLogger ( "HOST" ) ; String fn = ( filename == null ) ? "unknown" : filename ; String re = ( reason == null ) ? "Fatal EE error." : reason ; hostLog . fatal ( re + " In " + fn + ":" + lineno ) ; if ( traces != null ) { for ( String trace : traces ) { hostLog . fatal ( trace ) ; } } VoltDB . crashLocalVoltDB ( re + " In " + fn + ":" + lineno , true , null ) ; }
Call VoltDB . crashVoltDB on behalf of the EE
32,182
public byte [ ] nextDependencyAsBytes ( final int dependencyId ) { final VoltTable vt = m_dependencyTracker . nextDependency ( dependencyId ) ; if ( vt != null ) { final ByteBuffer buf2 = PrivateVoltTableFactory . getTableDataReference ( vt ) ; int pos = buf2 . position ( ) ; byte [ ] bytes = new byte [ buf2 . limit ( ) - pos ] ; buf2 . get ( bytes ) ; buf2 . position ( pos ) ; return bytes ; } else { return null ; } }
Called from the ExecutionEngine to request serialized dependencies .
32,183
public void loadCatalog ( long timestamp , String serializedCatalog ) { try { setupProcedure ( null ) ; m_fragmentContext = FragmentContext . CATALOG_LOAD ; coreLoadCatalog ( timestamp , getStringBytes ( serializedCatalog ) ) ; } finally { m_fragmentContext = FragmentContext . UNKNOWN ; } }
Pass the catalog to the engine
32,184
public final void updateCatalog ( final long timestamp , final boolean isStreamUpdate , final String diffCommands ) throws EEException { try { setupProcedure ( null ) ; m_fragmentContext = FragmentContext . CATALOG_UPDATE ; coreUpdateCatalog ( timestamp , isStreamUpdate , diffCommands ) ; } finally { m_fragmentContext = FragmentContext . UNKNOWN ; } }
Pass diffs to apply to the EE s catalog to update it
32,185
public FastDeserializer executePlanFragments ( int numFragmentIds , long [ ] planFragmentIds , long [ ] inputDepIds , Object [ ] parameterSets , DeterminismHash determinismHash , String [ ] sqlTexts , boolean [ ] isWriteFrags , int [ ] sqlCRCs , long txnId , long spHandle , long lastCommittedSpHandle , long uniqueId , long undoQuantumToken , boolean traceOn ) throws EEException { try { m_fragmentContext = ( undoQuantumToken == Long . MAX_VALUE ) ? FragmentContext . RO_BATCH : FragmentContext . RW_BATCH ; m_sqlTexts = sqlTexts ; if ( traceOn ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPSITE ) ; if ( traceLog != null ) { traceLog . add ( ( ) -> VoltTrace . beginDuration ( "execplanfragment" , "txnId" , TxnEgo . txnIdToString ( txnId ) , "partition" , Integer . toString ( m_partitionId ) ) ) ; } } FastDeserializer results = coreExecutePlanFragments ( m_currentBatchIndex , numFragmentIds , planFragmentIds , inputDepIds , parameterSets , determinismHash , isWriteFrags , sqlCRCs , txnId , spHandle , lastCommittedSpHandle , uniqueId , undoQuantumToken , traceOn ) ; if ( traceOn ) { final VoltTrace . TraceEventBatch traceLog = VoltTrace . log ( VoltTrace . Category . SPSITE ) ; if ( traceLog != null ) { traceLog . add ( VoltTrace :: endDuration ) ; } } m_plannerStats . updateEECacheStats ( m_eeCacheSize , numFragmentIds - m_cacheMisses , m_cacheMisses , m_partitionId ) ; return results ; } finally { m_cacheMisses = 0 ; m_sqlTexts = null ; m_fragmentContext = FragmentContext . UNKNOWN ; } }
Run multiple plan fragments
32,186
public synchronized void setFlushInterval ( long delay , long seconds ) { if ( m_flush != null ) { m_flush . cancel ( false ) ; m_flush = null ; } if ( seconds > 0 ) { m_flush = m_ses . scheduleAtFixedRate ( new Runnable ( ) { public void run ( ) { try { flush ( ) ; } catch ( Exception e ) { loaderLog . error ( "Failed to flush loader buffer, some tuples may not be inserted." , e ) ; } } } , delay , seconds , TimeUnit . SECONDS ) ; } }
Set periodic flush interval and initial delay in seconds .
32,187
public synchronized void close ( ) { if ( isClosed ) { return ; } rollback ( false ) ; try { database . logger . writeToLog ( this , Tokens . T_DISCONNECT ) ; } catch ( HsqlException e ) { } sessionData . closeAllNavigators ( ) ; sessionData . persistentStoreCollection . clearAllTables ( ) ; sessionData . closeResultCache ( ) ; database . compiledStatementManager . removeSession ( sessionId ) ; database . sessionManager . removeSession ( this ) ; database . closeIfLast ( ) ; database = null ; user = null ; rowActionList = null ; sessionContext . savepoints = null ; intConnection = null ; sessionContext = null ; lastIdentity = null ; isClosed = true ; }
Closes this Session .
32,188
public void setIsolation ( int level ) { if ( isInMidTransaction ( ) ) { throw Error . error ( ErrorCode . X_25001 ) ; } if ( level == SessionInterface . TX_READ_UNCOMMITTED ) { isReadOnly = true ; } isolationMode = level ; if ( isolationMode != isolationModeDefault ) { database . logger . writeToLog ( this , getTransactionIsolationSQL ( ) ) ; } }
sets ISOLATION for the next transaction only
32,189
void checkDDLWrite ( ) { checkReadWrite ( ) ; if ( isProcessingScript || isProcessingLog ) { return ; } if ( database . isFilesReadOnly ( ) ) { throw Error . error ( ErrorCode . DATABASE_IS_READONLY ) ; } }
This is used for creating new database objects such as tables .
32,190
void addDeleteAction ( Table table , Row row ) { if ( abortTransaction ) { } database . txManager . addDeleteAction ( this , table , row ) ; }
Adds a delete action to the row and the transaction manager .
32,191
public synchronized void setAutoCommit ( boolean autocommit ) { if ( isClosed ) { return ; } if ( autocommit != isAutoCommit ) { commit ( false ) ; isAutoCommit = autocommit ; } }
Setter for the autocommit attribute .
32,192
public synchronized void commit ( boolean chain ) { if ( isClosed ) { return ; } if ( ! isTransaction ) { isReadOnly = isReadOnlyDefault ; isolationMode = isolationModeDefault ; return ; } if ( ! database . txManager . commitTransaction ( this ) ) { rollback ( false ) ; throw Error . error ( ErrorCode . X_40001 ) ; } endTransaction ( true ) ; }
Commits any uncommited transaction this Session may have open
32,193
public synchronized void rollback ( boolean chain ) { if ( isClosed ) { return ; } if ( ! isTransaction ) { isReadOnly = isReadOnlyDefault ; isolationMode = isolationModeDefault ; return ; } try { database . logger . writeToLog ( this , Tokens . T_ROLLBACK ) ; } catch ( HsqlException e ) { } database . txManager . rollback ( this ) ; endTransaction ( false ) ; }
Rolls back any uncommited transaction this Session may have open .
32,194
public synchronized void savepoint ( String name ) { int index = sessionContext . savepoints . getIndex ( name ) ; if ( index != - 1 ) { sessionContext . savepoints . remove ( name ) ; sessionContext . savepointTimestamps . remove ( index ) ; } sessionContext . savepoints . add ( name , ValuePool . getInt ( rowActionList . size ( ) ) ) ; sessionContext . savepointTimestamps . addLast ( actionTimestamp ) ; try { database . logger . writeToLog ( this , getSavepointSQL ( name ) ) ; } catch ( HsqlException e ) { } }
Registers a transaction SAVEPOINT . A new SAVEPOINT with the name of an existing one replaces the old SAVEPOINT .
32,195
public synchronized void rollbackToSavepoint ( String name ) { if ( isClosed ) { return ; } int index = sessionContext . savepoints . getIndex ( name ) ; if ( index < 0 ) { throw Error . error ( ErrorCode . X_3B001 , name ) ; } database . txManager . rollbackSavepoint ( this , index ) ; try { database . logger . writeToLog ( this , getSavepointRollbackSQL ( name ) ) ; } catch ( HsqlException e ) { } }
Performs a partial transaction ROLLBACK to savepoint .
32,196
public synchronized void rollbackToSavepoint ( ) { if ( isClosed ) { return ; } String name = ( String ) sessionContext . savepoints . getKey ( 0 ) ; database . txManager . rollbackSavepoint ( this , 0 ) ; try { database . logger . writeToLog ( this , getSavepointRollbackSQL ( name ) ) ; } catch ( HsqlException e ) { } }
Performs a partial transaction ROLLBACK of current savepoint level .
32,197
public synchronized void releaseSavepoint ( String name ) { int index = sessionContext . savepoints . getIndex ( name ) ; if ( index < 0 ) { throw Error . error ( ErrorCode . X_3B001 , name ) ; } while ( sessionContext . savepoints . size ( ) > index ) { sessionContext . savepoints . remove ( sessionContext . savepoints . size ( ) - 1 ) ; sessionContext . savepointTimestamps . removeLast ( ) ; } }
Releases a savepoint
32,198
public void setReadOnly ( boolean readonly ) { if ( ! readonly && database . databaseReadOnly ) { throw Error . error ( ErrorCode . DATABASE_IS_READONLY ) ; } if ( isInMidTransaction ( ) ) { throw Error . error ( ErrorCode . X_25001 ) ; } isReadOnly = readonly ; }
sets READ ONLY for next transaction only
32,199
private Result executeResultUpdate ( Result cmd ) { long id = cmd . getResultId ( ) ; int actionType = cmd . getActionType ( ) ; Result result = sessionData . getDataResult ( id ) ; if ( result == null ) { return Result . newErrorResult ( Error . error ( ErrorCode . X_24501 ) ) ; } Object [ ] pvals = cmd . getParameterData ( ) ; Type [ ] types = cmd . metaData . columnTypes ; StatementQuery statement = ( StatementQuery ) result . getStatement ( ) ; QueryExpression qe = statement . queryExpression ; Table baseTable = qe . getBaseTable ( ) ; int [ ] columnMap = qe . getBaseTableColumnMap ( ) ; sessionContext . rowUpdateStatement . setRowActionProperties ( actionType , baseTable , types , columnMap ) ; Result resultOut = executeCompiledStatement ( sessionContext . rowUpdateStatement , pvals ) ; return resultOut ; }
Retrieves the result of inserting updating or deleting a row from an updatable result .