idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
31,900
public void setNull ( int parameterIndex , int sqlType ) throws SQLException { checkParameterBounds ( parameterIndex ) ; switch ( sqlType ) { case Types . TINYINT : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_TINYINT ; break ; case Types . SMALLINT : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_SMALLINT ; break ; case Types . INTEGER : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_INTEGER ; break ; case Types . BIGINT : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_BIGINT ; break ; case Types . DOUBLE : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_FLOAT ; break ; case Types . DECIMAL : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_DECIMAL ; break ; case Types . TIMESTAMP : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_TIMESTAMP ; break ; case Types . VARBINARY : case Types . VARCHAR : case Types . NVARCHAR : case Types . OTHER : case Types . NULL : this . parameters [ parameterIndex - 1 ] = VoltType . NULL_STRING_OR_VARBINARY ; break ; default : throw SQLError . get ( SQLError . ILLEGAL_ARGUMENT ) ; } }
Sets the designated parameter to SQL NULL .
31,901
public void setObject ( int parameterIndex , Object x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
Sets the value of the designated parameter using the given object .
31,902
public void setShort ( int parameterIndex , short x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x ; }
Sets the designated parameter to the given Java short value .
31,903
public void setTimestamp ( int parameterIndex , Timestamp x , Calendar cal ) throws SQLException { checkParameterBounds ( parameterIndex ) ; throw SQLError . noSupport ( ) ; }
Sets the designated parameter to the given java . sql . Timestamp value using the given Calendar object .
31,904
public void setURL ( int parameterIndex , URL x ) throws SQLException { checkParameterBounds ( parameterIndex ) ; this . parameters [ parameterIndex - 1 ] = x == null ? VoltType . NULL_STRING_OR_VARBINARY : x . toString ( ) ; }
Sets the designated parameter to the given java . net . URL value .
31,905
public final AbstractImporter createImporter ( ImporterConfig config ) { AbstractImporter importer = create ( config ) ; importer . setImportServerAdapter ( m_importServerAdapter ) ; return importer ; }
Method that is used by the importer framework classes to create an importer instance and wire it correctly for use within the server .
31,906
private static final byte [ ] expandToLength16 ( byte scaledValue [ ] , final boolean isNegative ) { if ( scaledValue . length == 16 ) { return scaledValue ; } byte replacement [ ] = new byte [ 16 ] ; if ( isNegative ) { Arrays . fill ( replacement , ( byte ) - 1 ) ; } int shift = ( 16 - scaledValue . length ) ; for ( int ii = 0 ; ii < scaledValue . length ; ++ ii ) { replacement [ ii + shift ] = scaledValue [ ii ] ; } return replacement ; }
Converts BigInteger s byte representation containing a scaled magnitude to a fixed size 16 byte array and set the sign in the most significant byte s most significant bit .
31,907
public static BigDecimal deserializeBigDecimalFromString ( String decimal ) throws IOException { if ( decimal == null ) { return null ; } BigDecimal bd = new BigDecimal ( decimal ) ; if ( bd . scale ( ) > kDefaultScale ) { bd = bd . stripTrailingZeros ( ) ; if ( bd . scale ( ) > kDefaultScale ) { bd = roundToScale ( bd , kDefaultScale , getRoundingMode ( ) ) ; } } if ( bd . scale ( ) < kDefaultScale ) { bd = bd . setScale ( kDefaultScale ) ; } if ( bd . precision ( ) > 38 ) { throw new RuntimeException ( "Decimal " + bd + " has more than " + kDefaultPrecision + " digits of precision." ) ; } return bd ; }
Deserialize a Volt fixed precision and scale 16 - byte decimal from a String representation
31,908
private static boolean isFileModifiedInCollectionPeriod ( File file ) { long diff = m_currentTimeMillis - file . lastModified ( ) ; if ( diff >= 0 ) { return TimeUnit . MILLISECONDS . toDays ( diff ) + 1 <= m_config . days ; } return false ; }
value of diff = 0 indicates current day
31,909
public static boolean voltMutateToBigintType ( Expression maybeConstantNode , Expression parent , int childIndex ) { if ( maybeConstantNode . opType == OpTypes . VALUE && maybeConstantNode . dataType != null && maybeConstantNode . dataType . isBinaryType ( ) ) { ExpressionValue exprVal = ( ExpressionValue ) maybeConstantNode ; if ( exprVal . valueData == null ) { return false ; } BinaryData data = ( BinaryData ) exprVal . valueData ; parent . nodes [ childIndex ] = new ExpressionValue ( data . toLong ( ) , Type . SQL_BIGINT ) ; return true ; } return false ; }
Given a ExpressionValue that is a VARBINARY constant convert it to a BIGINT constant . Returns true for a successful conversion and false otherwise .
31,910
private void getFKStatement ( StringBuffer a ) { if ( ! getName ( ) . isReservedName ( ) ) { a . append ( Tokens . T_CONSTRAINT ) . append ( ' ' ) ; a . append ( getName ( ) . statementName ) ; a . append ( ' ' ) ; } a . append ( Tokens . T_FOREIGN ) . append ( ' ' ) . append ( Tokens . T_KEY ) ; int [ ] col = getRefColumns ( ) ; getColumnList ( getRef ( ) , col , col . length , a ) ; a . append ( ' ' ) . append ( Tokens . T_REFERENCES ) . append ( ' ' ) ; a . append ( getMain ( ) . getName ( ) . getSchemaQualifiedStatementName ( ) ) ; col = getMainColumns ( ) ; getColumnList ( getMain ( ) , col , col . length , a ) ; if ( getDeleteAction ( ) != Constraint . NO_ACTION ) { a . append ( ' ' ) . append ( Tokens . T_ON ) . append ( ' ' ) . append ( Tokens . T_DELETE ) . append ( ' ' ) ; a . append ( getDeleteActionString ( ) ) ; } if ( getUpdateAction ( ) != Constraint . NO_ACTION ) { a . append ( ' ' ) . append ( Tokens . T_ON ) . append ( ' ' ) . append ( Tokens . T_UPDATE ) . append ( ' ' ) ; a . append ( getUpdateActionString ( ) ) ; } }
Generates the foreign key declaration for a given Constraint object .
31,911
private static void getColumnList ( Table t , int [ ] col , int len , StringBuffer a ) { a . append ( '(' ) ; for ( int i = 0 ; i < len ; i ++ ) { a . append ( t . getColumn ( col [ i ] ) . getName ( ) . statementName ) ; if ( i < len - 1 ) { a . append ( ',' ) ; } } a . append ( ')' ) ; }
Generates the column definitions for a table .
31,912
private static String getActionString ( int action ) { switch ( action ) { case Constraint . RESTRICT : return Tokens . T_RESTRICT ; case Constraint . CASCADE : return Tokens . T_CASCADE ; case Constraint . SET_DEFAULT : return Tokens . T_SET + ' ' + Tokens . T_DEFAULT ; case Constraint . SET_NULL : return Tokens . T_SET + ' ' + Tokens . T_NULL ; default : return Tokens . T_NO + ' ' + Tokens . T_ACTION ; } }
Returns the foreign key action rule .
31,913
boolean isUniqueWithColumns ( int [ ] cols ) { if ( constType != UNIQUE || core . mainCols . length != cols . length ) { return false ; } return ArrayUtil . haveEqualSets ( core . mainCols , cols , cols . length ) ; }
Compares this with another constraint column set . This is used only for UNIQUE constraints .
31,914
boolean isEquivalent ( Table mainTable , int [ ] mainCols , Table refTable , int [ ] refCols ) { if ( constType != Constraint . MAIN && constType != Constraint . FOREIGN_KEY ) { return false ; } if ( mainTable != core . mainTable || refTable != core . refTable ) { return false ; } return ArrayUtil . areEqualSets ( core . mainCols , mainCols ) && ArrayUtil . areEqualSets ( core . refCols , refCols ) ; }
Compares this with another constraint column set . This implementation only checks FOREIGN KEY constraints .
31,915
void updateTable ( Session session , Table oldTable , Table newTable , int colIndex , int adjust ) { if ( oldTable == core . mainTable ) { core . mainTable = newTable ; if ( core . mainIndex != null ) { core . mainIndex = core . mainTable . getIndex ( core . mainIndex . getName ( ) . name ) ; core . mainCols = ArrayUtil . toAdjustedColumnArray ( core . mainCols , colIndex , adjust ) ; } } if ( oldTable == core . refTable ) { core . refTable = newTable ; if ( core . refIndex != null ) { core . refIndex = core . refTable . getIndex ( core . refIndex . getName ( ) . name ) ; core . refCols = ArrayUtil . toAdjustedColumnArray ( core . refCols , colIndex , adjust ) ; } } if ( constType == CHECK ) { recompile ( session , newTable ) ; } }
Used to update constrains to reflect structural changes in a table . Prior checks must ensure that this method does not throw .
31,916
void checkInsert ( Session session , Table table , Object [ ] row ) { switch ( constType ) { case CHECK : if ( ! isNotNull ) { checkCheckConstraint ( session , table , row ) ; } return ; case FOREIGN_KEY : PersistentStore store = session . sessionData . getRowStore ( core . mainTable ) ; if ( ArrayUtil . hasNull ( row , core . refCols ) ) { if ( core . matchType == OpTypes . MATCH_SIMPLE ) { return ; } if ( core . refCols . length == 1 ) { return ; } if ( ArrayUtil . hasAllNull ( row , core . refCols ) ) { return ; } } else if ( core . mainIndex . exists ( session , store , row , core . refCols ) ) { return ; } else if ( core . mainTable == core . refTable ) { int compare = core . mainIndex . compareRowNonUnique ( row , core . refCols , row ) ; if ( compare == 0 ) { return ; } } String [ ] info = new String [ ] { core . refName . name , core . mainTable . getName ( ) . name } ; throw Error . error ( ErrorCode . X_23502 , ErrorCode . CONSTRAINT , info ) ; } }
Checks for foreign key or check constraint violation when inserting a row into the child table .
31,917
boolean checkHasMainRef ( Session session , Object [ ] row ) { if ( ArrayUtil . hasNull ( row , core . refCols ) ) { return false ; } PersistentStore store = session . sessionData . getRowStore ( core . mainTable ) ; boolean exists = core . mainIndex . exists ( session , store , row , core . refCols ) ; if ( ! exists ) { String [ ] info = new String [ ] { core . refName . name , core . mainTable . getName ( ) . name } ; throw Error . error ( ErrorCode . X_23502 , ErrorCode . CONSTRAINT , info ) ; } return exists ; }
For the candidate table row finds any referring node in the main table . This is used to check referential integrity when updating a node . We have to make sure that the main table still holds a valid main record . returns true If a valid row is found false if there are null in the data Otherwise a INTEGRITY VIOLATION Exception gets thrown .
31,918
void checkReferencedRows ( Session session , Table table , int [ ] rowColArray ) { Index mainIndex = getMainIndex ( ) ; PersistentStore store = session . sessionData . getRowStore ( table ) ; RowIterator it = table . rowIterator ( session ) ; while ( true ) { Row row = it . getNextRow ( ) ; if ( row == null ) { break ; } Object [ ] rowData = row . getData ( ) ; if ( ArrayUtil . hasNull ( rowData , rowColArray ) ) { if ( core . matchType == OpTypes . MATCH_SIMPLE ) { continue ; } } else if ( mainIndex . exists ( session , store , rowData , rowColArray ) ) { continue ; } if ( ArrayUtil . hasAllNull ( rowData , rowColArray ) ) { continue ; } String colValues = "" ; for ( int i = 0 ; i < rowColArray . length ; i ++ ) { Object o = rowData [ rowColArray [ i ] ] ; colValues += table . getColumnTypes ( ) [ i ] . convertToString ( o ) ; colValues += "," ; } String [ ] info = new String [ ] { getName ( ) . name , getMain ( ) . getName ( ) . name } ; throw Error . error ( ErrorCode . X_23502 , ErrorCode . CONSTRAINT , info ) ; } }
Check used before creating a new foreign key cosntraint this method checks all rows of a table to ensure they all have a corresponding row in the main table .
31,919
static int chooseTableSize ( int setSize ) { if ( setSize == 1 ) { return 2 ; } int tableSize = Integer . highestOneBit ( setSize - 1 ) << 1 ; while ( tableSize * DESIRED_LOAD_FACTOR < setSize ) { tableSize <<= 1 ; } return tableSize ; }
Returns an array size suitable for the backing array of a hash table that uses open addressing with linear probing in its implementation . The returned size is the smallest power of two that can hold setSize elements with the desired load factor .
31,920
public String [ ] decode ( long generation , String tableName , List < VoltType > types , List < String > names , String [ ] to , Object [ ] fields ) throws RuntimeException { Preconditions . checkArgument ( fields != null && fields . length > m_firstFieldOffset , "null or inapropriately sized export row array" ) ; StringFieldDecoder [ ] fieldDecoders ; if ( ! m_fieldDecoders . containsKey ( generation ) ) { int fieldCount = 0 ; Map < String , DecodeType > typeMap = getTypeMap ( generation , types , names ) ; ImmutableList . Builder < StringFieldDecoder > lb = ImmutableList . builder ( ) ; for ( org . voltdb . exportclient . decode . DecodeType dt : typeMap . values ( ) ) { lb . add ( dt . accept ( decodingVisitor , fieldCount ++ , null ) ) ; } fieldDecoders = lb . build ( ) . toArray ( new StringFieldDecoder [ 0 ] ) ; m_fieldDecoders . put ( generation , fieldDecoders ) ; } else { fieldDecoders = m_fieldDecoders . get ( generation ) ; } if ( to == null || to . length < fieldDecoders . length ) { to = new String [ fieldDecoders . length ] ; } for ( int i = m_firstFieldOffset , j = 0 ; i < fields . length && j < fieldDecoders . length ; ++ i , ++ j ) { fieldDecoders [ j ] . decode ( to , fields [ i ] ) ; } return to ; }
Converts an object array containing an exported row values into an array of their string representations
31,921
Iv2InFlight findHandle ( long ciHandle ) { assert ( ! shouldCheckThreadIdAssertion ( ) || m_expectedThreadId == Thread . currentThread ( ) . getId ( ) ) ; int partitionId = getPartIdFromHandle ( ciHandle ) ; PartitionInFlightTracker partitionStuff = m_trackerMap . get ( partitionId ) ; if ( partitionStuff == null ) { tmLog . error ( "Unable to find handle list for partition: " + partitionId + ", client interface handle: " + ciHandle ) ; return null ; } Iv2InFlight inFlight = partitionStuff . m_inFlights . remove ( ciHandle ) ; if ( inFlight != null ) { m_acg . reduceBackpressure ( inFlight . m_messageSize ) ; m_outstandingTxns -- ; return inFlight ; } return null ; }
Retrieve the client information for the specified handle
31,922
void freeOutstandingTxns ( ) { assert ( ! shouldCheckThreadIdAssertion ( ) || m_expectedThreadId == Thread . currentThread ( ) . getId ( ) ) ; for ( PartitionInFlightTracker tracker : m_trackerMap . values ( ) ) { for ( Iv2InFlight inflight : tracker . m_inFlights . values ( ) ) { m_outstandingTxns -- ; m_acg . reduceBackpressure ( inflight . m_messageSize ) ; } } }
When a connection goes away free all resources held by that connection This opens a small window of opportunity for mischief in that work may still be outstanding in the cluster but once the client goes away so does does the mapping to the resources allocated to it .
31,923
void loadSchema ( Reader reader , Database db , DdlProceduresToLoad whichProcs ) throws VoltCompiler . VoltCompilerException { int currLineNo = 1 ; DDLStatement stmt = getNextStatement ( reader , m_compiler , currLineNo ) ; while ( stmt != null ) { processVoltDBStatements ( db , whichProcs , stmt ) ; stmt = getNextStatement ( reader , m_compiler , stmt . endLineNo ) ; } try { reader . close ( ) ; } catch ( IOException e ) { throw m_compiler . new VoltCompilerException ( "Error closing schema file" ) ; } m_tracker . addExtraClasses ( m_classMatcher . getMatchedClassList ( ) ) ; m_classMatcher . clear ( ) ; }
Compile a DDL schema from an abstract reader
31,924
private String generateDDLForDRConflictsTable ( Database currentDB , Database previousDBIfAny , boolean isCurrentXDCR ) { StringBuilder sb = new StringBuilder ( ) ; if ( isCurrentXDCR ) { createDRConflictTables ( sb , previousDBIfAny ) ; } else { dropDRConflictTablesIfNeeded ( sb ) ; } return sb . toString ( ) ; }
Generate DDL to create or drop the DR conflict table
31,925
private void processCreateStreamStatement ( DDLStatement stmt , Database db , DdlProceduresToLoad whichProcs ) throws VoltCompilerException { String statement = stmt . statement ; Matcher statementMatcher = SQLParser . matchCreateStream ( statement ) ; if ( statementMatcher . matches ( ) ) { String tableName = checkIdentifierStart ( statementMatcher . group ( 1 ) , statement ) ; String targetName = null ; String columnName = null ; if ( ( statementMatcher . groupCount ( ) > 1 ) && ( statementMatcher . group ( 2 ) != null ) && ( ! statementMatcher . group ( 2 ) . isEmpty ( ) ) ) { String clauses = statementMatcher . group ( 2 ) ; Matcher matcher = SQLParser . matchAnyCreateStreamStatementClause ( clauses ) ; int start = 0 ; while ( matcher . find ( start ) ) { start = matcher . end ( ) ; if ( matcher . group ( 1 ) != null ) { if ( targetName != null ) { throw m_compiler . new VoltCompilerException ( "Only one Export clause is allowed for CREATE STREAM." ) ; } targetName = matcher . group ( 1 ) ; } else { if ( columnName != null ) { throw m_compiler . new VoltCompilerException ( "Only one PARTITION clause is allowed for CREATE STREAM." ) ; } columnName = matcher . group ( 2 ) ; } } } VoltXMLElement tableXML = m_schema . findChild ( "table" , tableName . toUpperCase ( ) ) ; if ( tableXML != null ) { tableXML . attributes . put ( "stream" , "true" ) ; } else { throw m_compiler . new VoltCompilerException ( String . format ( "Invalid STREAM statement: table %s does not exist" , tableName ) ) ; } if ( columnName != null ) { tableXML . attributes . put ( "partitioncolumn" , columnName . toUpperCase ( ) ) ; m_compiler . markTableAsDirty ( tableName ) ; } targetName = ( targetName != null ) ? checkIdentifierStart ( targetName , statement ) : Constants . DEFAULT_EXPORT_CONNECTOR_NAME ; if ( tableXML . attributes . containsKey ( "drTable" ) && "ENABLE" . equals ( tableXML . attributes . get ( "drTable" ) ) ) { throw m_compiler . new VoltCompilerException ( String . format ( "Invalid CREATE STREAM statement: table %s is a DR table." , tableName ) ) ; } else { tableXML . attributes . put ( "export" , targetName ) ; } } else { throw m_compiler . new VoltCompilerException ( String . format ( "Invalid CREATE STREAM statement: \"%s\", " + "expected syntax: CREATE STREAM <table> [PARTITION ON COLUMN <column-name>] [EXPORT TO TARGET <target>] (column datatype, ...); " , statement . substring ( 0 , statement . length ( ) - 1 ) ) ) ; } }
Process a VoltDB - specific create stream DDL statement
31,926
private void fillTrackerFromXML ( ) { for ( VoltXMLElement e : m_schema . children ) { if ( e . name . equals ( "table" ) ) { String tableName = e . attributes . get ( "name" ) ; String partitionCol = e . attributes . get ( "partitioncolumn" ) ; String export = e . attributes . get ( "export" ) ; String drTable = e . attributes . get ( "drTable" ) ; String migrateTarget = e . attributes . get ( "migrateExport" ) ; export = StringUtil . isEmpty ( export ) ? migrateTarget : export ; final boolean isStream = ( e . attributes . get ( "stream" ) != null ) ; if ( partitionCol != null ) { m_tracker . addPartition ( tableName , partitionCol ) ; } else { m_tracker . removePartition ( tableName ) ; } if ( ! StringUtil . isEmpty ( export ) ) { m_tracker . addExportedTable ( tableName , export , isStream ) ; } else { m_tracker . removeExportedTable ( tableName , isStream ) ; } if ( drTable != null ) { m_tracker . addDRedTable ( tableName , drTable ) ; } } } }
requested from the compiler
31,927
private static boolean indexesAreDups ( Index idx1 , Index idx2 ) { if ( idx1 . getType ( ) != idx2 . getType ( ) ) { return false ; } if ( idx1 . getCountable ( ) != idx2 . getCountable ( ) ) { return false ; } if ( idx1 . getUnique ( ) != idx2 . getUnique ( ) ) { return false ; } if ( idx1 . getAssumeunique ( ) != idx2 . getAssumeunique ( ) ) { return false ; } if ( idx1 . getColumns ( ) . size ( ) != idx2 . getColumns ( ) . size ( ) ) { return false ; } if ( ! ( idx1 . getExpressionsjson ( ) . equals ( idx2 . getExpressionsjson ( ) ) ) ) { return false ; } int [ ] idx1baseTableOrder = new int [ idx1 . getColumns ( ) . size ( ) ] ; for ( ColumnRef cref : idx1 . getColumns ( ) ) { int index = cref . getIndex ( ) ; int baseTableIndex = cref . getColumn ( ) . getIndex ( ) ; idx1baseTableOrder [ index ] = baseTableIndex ; } int [ ] idx2baseTableOrder = new int [ idx2 . getColumns ( ) . size ( ) ] ; for ( ColumnRef cref : idx2 . getColumns ( ) ) { int index = cref . getIndex ( ) ; int baseTableIndex = cref . getColumn ( ) . getIndex ( ) ; idx2baseTableOrder [ index ] = baseTableIndex ; } if ( ! Arrays . equals ( idx1baseTableOrder , idx2baseTableOrder ) ) { return false ; } if ( idx1 . getPredicatejson ( ) . length ( ) > 0 ) { return idx1 . getPredicatejson ( ) . equals ( idx2 . getPredicatejson ( ) ) ; } if ( idx2 . getPredicatejson ( ) . length ( ) > 0 ) { return idx2 . getPredicatejson ( ) . equals ( idx1 . getPredicatejson ( ) ) ; } return true ; }
Return true if the two indexes are identical with a different name .
31,928
private void addConstraintToCatalog ( Table table , VoltXMLElement node , Map < String , String > indexReplacementMap , Map < String , Index > indexMap ) throws VoltCompilerException { assert node . name . equals ( "constraint" ) ; String name = node . attributes . get ( "name" ) ; String typeName = node . attributes . get ( "constrainttype" ) ; ConstraintType type = ConstraintType . valueOf ( typeName ) ; String tableName = table . getTypeName ( ) ; if ( type == ConstraintType . LIMIT ) { int tupleLimit = Integer . parseInt ( node . attributes . get ( "rowslimit" ) ) ; if ( tupleLimit < 0 ) { throw m_compiler . new VoltCompilerException ( "Invalid constraint limit number '" + tupleLimit + "'" ) ; } if ( tableLimitConstraintCounter . contains ( tableName ) ) { throw m_compiler . new VoltCompilerException ( "Too many table limit constraints for table " + tableName ) ; } else { tableLimitConstraintCounter . add ( tableName ) ; } table . setTuplelimit ( tupleLimit ) ; String deleteStmt = node . attributes . get ( "rowslimitdeletestmt" ) ; if ( deleteStmt != null ) { Statement catStmt = table . getTuplelimitdeletestmt ( ) . add ( "limit_delete" ) ; catStmt . setSqltext ( deleteStmt ) ; validateTupleLimitDeleteStmt ( catStmt ) ; } return ; } if ( type == ConstraintType . CHECK ) { String msg = "VoltDB does not enforce check constraints. " ; msg += "Constraint on table " + tableName + " will be ignored." ; m_compiler . addWarn ( msg ) ; return ; } else if ( type == ConstraintType . FOREIGN_KEY ) { String msg = "VoltDB does not enforce foreign key references and constraints. " ; msg += "Constraint on table " + tableName + " will be ignored." ; m_compiler . addWarn ( msg ) ; return ; } else if ( type == ConstraintType . MAIN ) { assert ( false ) ; } else if ( type == ConstraintType . NOT_NULL ) { return ; } else if ( type != ConstraintType . PRIMARY_KEY && type != ConstraintType . UNIQUE ) { throw m_compiler . new VoltCompilerException ( "Invalid constraint type '" + typeName + "'" ) ; } Constraint catalog_const = table . getConstraints ( ) . add ( name ) ; String indexName = node . attributes . get ( "index" ) ; assert ( indexName != null ) ; if ( indexReplacementMap . containsKey ( indexName ) ) { indexName = indexReplacementMap . get ( indexName ) ; } Index catalog_index = indexMap . get ( indexName ) ; if ( catalog_index != null ) { catalog_const . setIndex ( catalog_index ) ; catalog_index . setUnique ( true ) ; boolean assumeUnique = Boolean . parseBoolean ( node . attributes . get ( "assumeunique" ) ) ; catalog_index . setAssumeunique ( assumeUnique ) ; } catalog_const . setType ( type . getValue ( ) ) ; }
Add a constraint on a given table to the catalog
31,929
private static AbstractExpression buildPartialIndexPredicate ( AbstractParsedStmt dummy , String indexName , VoltXMLElement predicateXML , Table table , VoltCompiler compiler ) throws VoltCompilerException { String tableName = table . getTypeName ( ) ; assert ( tableName != null ) ; StringBuffer msg = new StringBuffer ( "Partial index \"" + indexName + "\" " ) ; List < VoltXMLElement > columnRefs = predicateXML . findChildrenRecursively ( "columnref" ) ; for ( VoltXMLElement columnRef : columnRefs ) { String columnRefTableName = columnRef . attributes . get ( "table" ) ; if ( columnRefTableName != null && ! tableName . equals ( columnRefTableName ) ) { msg . append ( "with expression(s) involving other tables is not supported." ) ; throw compiler . new VoltCompilerException ( msg . toString ( ) ) ; } } AbstractExpression predicate = dummy . parseExpressionTree ( predicateXML ) ; if ( ! predicate . isValidExprForIndexesAndMVs ( msg , false ) ) { throw compiler . new VoltCompilerException ( msg . toString ( ) ) ; } return predicate ; }
Build the abstract expression representing the partial index predicate . Verify it satisfies the rules . Throw error messages otherwise .
31,930
public Result getLob ( Session session , long lobID , long offset , long length ) { throw Error . runtimeError ( ErrorCode . U_S0500 , "LobManager" ) ; }
Used for SUBSTRING
31,931
public void close ( ) throws SQLException { try { isClosed = true ; JDBC4ClientConnectionPool . dispose ( NativeConnection ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
Releases this Connection object s database and JDBC resources immediately instead of waiting for them to be automatically released .
31,932
public Array createArrayOf ( String typeName , Object [ ] elements ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Factory method for creating Array objects .
31,933
public Statement createStatement ( ) throws SQLException { checkClosed ( ) ; try { return new JDBC4Statement ( this ) ; } catch ( Exception x ) { throw SQLError . get ( x ) ; } }
Creates a Statement object for sending SQL statements to the database .
31,934
public Struct createStruct ( String typeName , Object [ ] attributes ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Factory method for creating Struct objects .
31,935
public PreparedStatement prepareStatement ( String sql , int [ ] columnIndexes ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Creates a default PreparedStatement object capable of returning the auto - generated keys designated by the given array .
31,936
public PreparedStatement prepareStatement ( String sql , int resultSetType , int resultSetConcurrency ) throws SQLException { if ( ( resultSetType == ResultSet . TYPE_SCROLL_INSENSITIVE || resultSetType == ResultSet . TYPE_FORWARD_ONLY ) && resultSetConcurrency == ResultSet . CONCUR_READ_ONLY ) { return prepareStatement ( sql ) ; } checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Creates a PreparedStatement object that will generate ResultSet objects with the given type and concurrency .
31,937
public PreparedStatement prepareStatement ( String sql , int resultSetType , int resultSetConcurrency , int resultSetHoldability ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Creates a PreparedStatement object that will generate ResultSet objects with the given type concurrency and holdability .
31,938
public void rollback ( ) throws SQLException { checkClosed ( ) ; if ( props . getProperty ( ROLLBACK_THROW_EXCEPTION , "true" ) . equalsIgnoreCase ( "true" ) ) { throw SQLError . noSupport ( ) ; } }
Undoes all changes made in the current transaction and releases any database locks currently held by this Connection object .
31,939
public void setAutoCommit ( boolean autoCommit ) throws SQLException { checkClosed ( ) ; if ( ! autoCommit && ( props . getProperty ( COMMIT_THROW_EXCEPTION , "true" ) . equalsIgnoreCase ( "true" ) ) ) { throw SQLError . noSupport ( ) ; } else { this . autoCommit = autoCommit ; } }
Sets this connection s auto - commit mode to the given state .
31,940
public void setReadOnly ( boolean readOnly ) throws SQLException { checkClosed ( ) ; if ( ! Boolean . parseBoolean ( props . getProperty ( "enableSetReadOnly" , "false" ) ) ) { throw SQLError . noSupport ( ) ; } }
Puts this connection in read - only mode as a hint to the driver to enable database optimizations .
31,941
public void setTypeMap ( Map < String , Class < ? > > map ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Installs the given TypeMap object as the type map for this Connection object .
31,942
public void saveStatistics ( ClientStats stats , String file ) throws IOException { this . NativeConnection . saveStatistics ( stats , file ) ; }
Save statistics to a file
31,943
private static boolean trimProcQuotas ( ZooKeeper zk , String path ) throws KeeperException , IOException , InterruptedException { if ( Quotas . quotaZookeeper . equals ( path ) ) { return true ; } List < String > children = zk . getChildren ( path , false ) ; if ( children . size ( ) == 0 ) { zk . delete ( path , - 1 ) ; String parent = path . substring ( 0 , path . lastIndexOf ( '/' ) ) ; return trimProcQuotas ( zk , parent ) ; } else { return true ; } }
trim the quota tree to recover unwanted tree elements in the quota s tree
31,944
public static boolean delQuota ( ZooKeeper zk , String path , boolean bytes , boolean numNodes ) throws KeeperException , IOException , InterruptedException { String parentPath = Quotas . quotaZookeeper + path ; String quotaPath = Quotas . quotaZookeeper + path + "/" + Quotas . limitNode ; if ( zk . exists ( quotaPath , false ) == null ) { System . out . println ( "Quota does not exist for " + path ) ; return true ; } byte [ ] data = null ; try { data = zk . getData ( quotaPath , false , new Stat ( ) ) ; } catch ( KeeperException . NoNodeException ne ) { System . err . println ( "quota does not exist for " + path ) ; return true ; } StatsTrack strack = new StatsTrack ( new String ( data ) ) ; if ( bytes && ! numNodes ) { strack . setBytes ( - 1L ) ; zk . setData ( quotaPath , strack . toString ( ) . getBytes ( ) , - 1 ) ; } else if ( ! bytes && numNodes ) { strack . setCount ( - 1 ) ; zk . setData ( quotaPath , strack . toString ( ) . getBytes ( ) , - 1 ) ; } else if ( bytes && numNodes ) { List < String > children = zk . getChildren ( parentPath , false ) ; for ( String child : children ) { zk . delete ( parentPath + "/" + child , - 1 ) ; } trimProcQuotas ( zk , parentPath ) ; } return true ; }
this method deletes quota for a node .
31,945
private static int generateCrudPKeyWhereClause ( Column partitioncolumn , Constraint pkey , StringBuilder sb ) { ArrayList < ColumnRef > indexColumns = new ArrayList < ColumnRef > ( pkey . getIndex ( ) . getColumns ( ) . size ( ) ) ; for ( ColumnRef c : pkey . getIndex ( ) . getColumns ( ) ) { indexColumns . add ( c ) ; } Collections . sort ( indexColumns , new ColumnRefComparator ( ) ) ; boolean first = true ; int partitionOffset = - 1 ; sb . append ( " WHERE " ) ; for ( ColumnRef pkc : indexColumns ) { if ( ! first ) sb . append ( " AND " ) ; first = false ; sb . append ( "(" + pkc . getColumn ( ) . getName ( ) + " = ?" + ")" ) ; if ( pkc . getColumn ( ) == partitioncolumn ) { partitionOffset = pkc . getIndex ( ) ; } } return partitionOffset ; }
Helper to generate a WHERE pkey_col1 = ? pkey_col2 = ? ... ; clause .
31,946
private static void generateCrudExpressionColumns ( Table table , StringBuilder sb ) { boolean first = true ; ArrayList < Column > tableColumns = new ArrayList < Column > ( table . getColumns ( ) . size ( ) ) ; for ( Column c : table . getColumns ( ) ) { tableColumns . add ( c ) ; } Collections . sort ( tableColumns , new TableColumnComparator ( ) ) ; for ( Column c : tableColumns ) { if ( ! first ) sb . append ( ", " ) ; first = false ; sb . append ( c . getName ( ) + " = ?" ) ; } }
Helper to generate a full col1 = ? col2 = ? ... clause .
31,947
public InProcessVoltDBServer start ( ) { DeploymentBuilder depBuilder = new DeploymentBuilder ( sitesPerHost , 1 , 0 ) ; depBuilder . setEnableCommandLogging ( false ) ; depBuilder . setUseDDLSchema ( true ) ; depBuilder . setHTTPDPort ( 8080 ) ; depBuilder . setJSONAPIEnabled ( true ) ; VoltDB . Configuration config = new VoltDB . Configuration ( ) ; if ( pathToLicense != null ) { config . m_pathToLicense = pathToLicense ; } else { config . m_pathToLicense = "./license.xml" ; } File tempDeployment = null ; try { tempDeployment = File . createTempFile ( "volt_deployment_" , ".xml" ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; System . exit ( - 1 ) ; } depBuilder . writeXML ( tempDeployment . getAbsolutePath ( ) ) ; config . m_pathToDeployment = tempDeployment . getAbsolutePath ( ) ; server = new ServerThread ( config ) ; server . start ( ) ; server . waitForInitialization ( ) ; return this ; }
Starts the in - process server and blocks until it is ready to accept connections .
31,948
public void compile ( Session session ) { if ( ! database . schemaManager . schemaExists ( compileTimeSchema . name ) ) { compileTimeSchema = session . getSchemaHsqlName ( null ) ; } session . setSchema ( compileTimeSchema . name ) ; ParserDQL p = new ParserDQL ( session , new Scanner ( statement ) ) ; p . read ( ) ; viewSubQuery = p . XreadViewSubquery ( this ) ; queryExpression = viewSubQuery . queryExpression ; if ( getColumnCount ( ) == 0 ) { if ( columnNames == null ) { columnNames = viewSubQuery . queryExpression . getResultColumnNames ( ) ; } if ( columnNames . length != viewSubQuery . queryExpression . getColumnCount ( ) ) { throw Error . error ( ErrorCode . X_42593 , tableName . statementName ) ; } TableUtil . setColumnsInSchemaTable ( this , columnNames , queryExpression . getColumnTypes ( ) ) ; } viewSubqueries = p . compileContext . getSubqueries ( ) ; for ( int i = 0 ; i < viewSubqueries . length ; i ++ ) { if ( viewSubqueries [ i ] . parentView == null ) { viewSubqueries [ i ] . parentView = this ; } } viewSubQuery . getTable ( ) . view = this ; viewSubQuery . getTable ( ) . columnList = columnList ; schemaObjectNames = p . compileContext . getSchemaObjectNames ( ) ; baseTable = queryExpression . getBaseTable ( ) ; if ( baseTable == null ) { return ; } switch ( check ) { case SchemaObject . ViewCheckModes . CHECK_NONE : break ; case SchemaObject . ViewCheckModes . CHECK_LOCAL : checkExpression = queryExpression . getCheckCondition ( ) ; break ; case SchemaObject . ViewCheckModes . CHECK_CASCADE : break ; default : throw Error . runtimeError ( ErrorCode . U_S0500 , "View" ) ; } }
Compiles the query expression and sets up the columns .
31,949
public static Pair < InMemoryJarfile , String > loadAndUpgradeCatalogFromJar ( byte [ ] catalogBytes , boolean isXDCR ) throws IOException { InMemoryJarfile jarfile = loadInMemoryJarFile ( catalogBytes ) ; return loadAndUpgradeCatalogFromJar ( jarfile , isXDCR ) ; }
Load a catalog from the jar bytes .
31,950
public static Pair < InMemoryJarfile , String > loadAndUpgradeCatalogFromJar ( InMemoryJarfile jarfile , boolean isXDCR ) throws IOException { VoltCompiler compiler = new VoltCompiler ( isXDCR ) ; String upgradedFromVersion = compiler . upgradeCatalogAsNeeded ( jarfile ) ; return new Pair < > ( jarfile , upgradedFromVersion ) ; }
Load a catalog from the InMemoryJarfile .
31,951
public static String getSerializedCatalogStringFromJar ( InMemoryJarfile jarfile ) { byte [ ] serializedCatalogBytes = jarfile . get ( CatalogUtil . CATALOG_FILENAME ) ; String serializedCatalog = new String ( serializedCatalogBytes , Constants . UTF8ENCODING ) ; return serializedCatalog ; }
Convenience method to extract the catalog commands from an InMemoryJarfile as a string
31,952
public static String [ ] getBuildInfoFromJar ( InMemoryJarfile jarfile ) throws IOException { byte [ ] buildInfoBytes = jarfile . get ( CATALOG_BUILDINFO_FILENAME ) ; if ( buildInfoBytes == null ) { throw new IOException ( "Catalog build information not found - please build your application using the current version of VoltDB." ) ; } String buildInfo ; buildInfo = new String ( buildInfoBytes , Constants . UTF8ENCODING ) ; String [ ] buildInfoLines = buildInfo . split ( "\n" ) ; if ( buildInfoLines . length < 1 ) { throw new IOException ( "Catalog build info has no version string." ) ; } String versionFromCatalog = buildInfoLines [ 0 ] . trim ( ) ; if ( ! CatalogUtil . isCatalogVersionValid ( versionFromCatalog ) ) { throw new IOException ( String . format ( "Catalog build info version (%s) is bad." , versionFromCatalog ) ) ; } for ( int i = 0 ; i < buildInfoLines . length ; ++ i ) { buildInfoLines [ i ] = buildInfoLines [ i ] . trim ( ) ; } return buildInfoLines ; }
Get the catalog build info from the jar bytes . Performs sanity checks on the build info and version strings .
31,953
public static String getAutoGenDDLFromJar ( InMemoryJarfile jarfile ) throws IOException { byte [ ] ddlBytes = jarfile . get ( VoltCompiler . AUTOGEN_DDL_FILE_NAME ) ; if ( ddlBytes == null ) { throw new IOException ( "Auto generated schema DDL not found - please make sure the database is initialized with valid schema." ) ; } String ddl = new String ( ddlBytes , StandardCharsets . UTF_8 ) ; return ddl . trim ( ) ; }
Get the auto generated DDL from the catalog jar .
31,954
public static InMemoryJarfile getCatalogJarWithoutDefaultArtifacts ( final InMemoryJarfile jarfile ) { InMemoryJarfile cloneJar = jarfile . deepCopy ( ) ; for ( String entry : CATALOG_DEFAULT_ARTIFACTS ) { cloneJar . remove ( entry ) ; } return cloneJar ; }
Removes the default voltdb artifact files from catalog and returns the resulltant jar file . This will contain dependency files needed for generated stored procs
31,955
public static InMemoryJarfile loadInMemoryJarFile ( byte [ ] catalogBytes ) throws IOException { assert ( catalogBytes != null ) ; InMemoryJarfile jarfile = new InMemoryJarfile ( catalogBytes ) ; if ( ! jarfile . containsKey ( CATALOG_FILENAME ) ) { throw new IOException ( "Database catalog not found - please build your application using the current version of VoltDB." ) ; } return jarfile ; }
Load an in - memory catalog jar file from jar bytes .
31,956
public static boolean isSnapshotablePersistentTableView ( Database db , Table table ) { Table materializer = table . getMaterializer ( ) ; if ( materializer == null ) { return false ; } if ( CatalogUtil . isTableExportOnly ( db , materializer ) ) { return false ; } if ( ! table . getIsreplicated ( ) && table . getPartitioncolumn ( ) == null ) { return false ; } return true ; }
Test if a table is a persistent table view and should be included in the snapshot .
31,957
public static boolean isSnapshotableStreamedTableView ( Database db , Table table ) { Table materializer = table . getMaterializer ( ) ; if ( materializer == null ) { return false ; } if ( ! CatalogUtil . isTableExportOnly ( db , materializer ) ) { return false ; } Column sourcePartitionColumn = materializer . getPartitioncolumn ( ) ; if ( sourcePartitionColumn == null ) { return false ; } Column pc = table . getColumns ( ) . get ( sourcePartitionColumn . getName ( ) ) ; if ( pc == null ) { return false ; } return true ; }
Test if a table is a streamed table view and should be included in the snapshot .
31,958
public static long getUniqueIdForFragment ( PlanFragment frag ) { long retval = 0 ; CatalogType parent = frag . getParent ( ) ; retval = ( ( long ) parent . getParent ( ) . getRelativeIndex ( ) ) << 32 ; retval += ( ( long ) parent . getRelativeIndex ( ) ) << 16 ; retval += frag . getRelativeIndex ( ) ; return retval ; }
Get a unique id for a plan fragment by munging the indices of it s parents and grandparents in the catalog .
31,959
public static < T extends CatalogType > List < T > getSortedCatalogItems ( CatalogMap < T > items , String sortFieldName ) { assert ( items != null ) ; assert ( sortFieldName != null ) ; TreeMap < Object , T > map = new TreeMap < > ( ) ; boolean hasField = false ; for ( T item : items ) { if ( hasField == false ) { hasField = ArrayUtils . contains ( item . getFields ( ) , sortFieldName ) ; } assert ( hasField == true ) ; map . put ( item . getField ( sortFieldName ) , item ) ; } ArrayList < T > retval = new ArrayList < > ( ) ; for ( T item : map . values ( ) ) { retval . add ( item ) ; } return retval ; }
Given a set of catalog items return a sorted list of them sorted by the value of a specified field . The field is specified by name . If the field doesn t exist trip an assertion . This is primarily used to sort a table s columns or a procedure s parameters .
31,960
public static < T extends CatalogType > void getSortedCatalogItems ( CatalogMap < T > items , String sortFieldName , List < T > result ) { result . addAll ( getSortedCatalogItems ( items , sortFieldName ) ) ; }
A getSortedCatalogItems variant with the result list filled in - place
31,961
public static Index getPrimaryKeyIndex ( Table catalogTable ) throws Exception { Constraint catalog_constraint = null ; for ( Constraint c : catalogTable . getConstraints ( ) ) { if ( c . getType ( ) == ConstraintType . PRIMARY_KEY . getValue ( ) ) { catalog_constraint = c ; break ; } } if ( catalog_constraint == null ) { throw new Exception ( "ERROR: Table '" + catalogTable . getTypeName ( ) + "' does not have a PRIMARY KEY constraint" ) ; } return ( catalog_constraint . getIndex ( ) ) ; }
For a given Table catalog object return the PrimaryKey Index catalog object
31,962
public static Collection < Column > getPrimaryKeyColumns ( Table catalogTable ) { Collection < Column > columns = new ArrayList < > ( ) ; Index catalog_idx = null ; try { catalog_idx = CatalogUtil . getPrimaryKeyIndex ( catalogTable ) ; } catch ( Exception ex ) { return ( columns ) ; } assert ( catalog_idx != null ) ; for ( ColumnRef catalog_col_ref : getSortedCatalogItems ( catalog_idx . getColumns ( ) , "index" ) ) { columns . add ( catalog_col_ref . getColumn ( ) ) ; } return ( columns ) ; }
Return all the of the primary key columns for a particular table If the table does not have a primary key then the returned list will be empty
31,963
public static boolean isTableExportOnly ( org . voltdb . catalog . Database database , org . voltdb . catalog . Table table ) { int type = table . getTabletype ( ) ; if ( TableType . isInvalidType ( type ) ) { for ( Connector connector : database . getConnectors ( ) ) { for ( ConnectorTableInfo tableInfo : connector . getTableinfo ( ) ) { if ( tableInfo . getTable ( ) . getRelativeIndex ( ) == table . getRelativeIndex ( ) ) { return true ; } } } return false ; } else { return TableType . isStream ( type ) ; } }
Return true if a table is a stream This function is duplicated in CatalogUtil . h
31,964
public static boolean isTableMaterializeViewSource ( org . voltdb . catalog . Database database , org . voltdb . catalog . Table table ) { CatalogMap < Table > tables = database . getTables ( ) ; for ( Table t : tables ) { Table matsrc = t . getMaterializer ( ) ; if ( ( matsrc != null ) && ( matsrc . getRelativeIndex ( ) == table . getRelativeIndex ( ) ) ) { return true ; } } return false ; }
Return true if a table is the source table for a materialized view .
31,965
public static List < Table > getMaterializeViews ( org . voltdb . catalog . Database database , org . voltdb . catalog . Table table ) { ArrayList < Table > tlist = new ArrayList < > ( ) ; CatalogMap < Table > tables = database . getTables ( ) ; for ( Table t : tables ) { Table matsrc = t . getMaterializer ( ) ; if ( ( matsrc != null ) && ( matsrc . getRelativeIndex ( ) == table . getRelativeIndex ( ) ) ) { tlist . add ( t ) ; } } return tlist ; }
Return list of materialized view for table .
31,966
public static boolean isCatalogCompatible ( String catalogVersionStr ) { if ( catalogVersionStr == null || catalogVersionStr . isEmpty ( ) ) { return false ; } Object [ ] catalogVersion = MiscUtils . parseVersionString ( catalogVersionStr ) ; if ( catalogVersion == null ) { throw new IllegalArgumentException ( "Invalid version string " + catalogVersionStr ) ; } if ( ! catalogVersionStr . equals ( VoltDB . instance ( ) . getVersionString ( ) ) ) { return false ; } return true ; }
Check if a catalog compiled with the given version of VoltDB is compatible with the current version of VoltDB .
31,967
public static boolean isCatalogVersionValid ( String catalogVersionStr ) { if ( catalogVersionStr == null || catalogVersionStr . isEmpty ( ) ) { return false ; } Object [ ] catalogVersion = MiscUtils . parseVersionString ( catalogVersionStr ) ; if ( catalogVersion == null ) { return false ; } return true ; }
Check if a catalog version string is valid .
31,968
public static String compileDeployment ( Catalog catalog , DeploymentType deployment , boolean isPlaceHolderCatalog ) { String errmsg = null ; try { validateDeployment ( catalog , deployment ) ; if ( catalog . getClusters ( ) . get ( "cluster" ) . getDeployment ( ) . get ( "deployment" ) == null ) { catalog . getClusters ( ) . get ( "cluster" ) . getDeployment ( ) . add ( "deployment" ) ; } setClusterInfo ( catalog , deployment ) ; setSnapshotInfo ( catalog , deployment . getSnapshot ( ) ) ; setSecurityEnabled ( catalog , deployment . getSecurity ( ) ) ; if ( ! isPlaceHolderCatalog ) { setUsersInfo ( catalog , deployment . getUsers ( ) ) ; } setHTTPDInfo ( catalog , deployment . getHttpd ( ) , deployment . getSsl ( ) ) ; setDrInfo ( catalog , deployment . getDr ( ) , deployment . getCluster ( ) , isPlaceHolderCatalog ) ; if ( ! isPlaceHolderCatalog ) { setExportInfo ( catalog , deployment . getExport ( ) ) ; setImportInfo ( catalog , deployment . getImport ( ) ) ; setSnmpInfo ( deployment . getSnmp ( ) ) ; } setCommandLogInfo ( catalog , deployment . getCommandlog ( ) ) ; VoltDB . instance ( ) . loadLegacyPathProperties ( deployment ) ; setupPaths ( deployment . getPaths ( ) ) ; validateResourceMonitorInfo ( deployment ) ; } catch ( Exception e ) { errmsg = "Error validating deployment configuration: " + e . getMessage ( ) ; hostLog . error ( errmsg ) ; return errmsg ; } return null ; }
Parse the deployment . xml file and add its data into the catalog .
31,969
public static DeploymentType parseDeployment ( String deploymentURL ) { InputStream deployIS = null ; try { URL deployURL = new URL ( deploymentURL ) ; deployIS = deployURL . openStream ( ) ; } catch ( MalformedURLException ex ) { try { deployIS = new FileInputStream ( deploymentURL ) ; } catch ( FileNotFoundException e ) { deployIS = null ; } } catch ( IOException ioex ) { deployIS = null ; } if ( deployIS == null ) { hostLog . error ( "Could not locate deployment info at given URL: " + deploymentURL ) ; return null ; } else { hostLog . info ( "URL of deployment info: " + deploymentURL ) ; } return getDeployment ( deployIS ) ; }
Parses the deployment XML file .
31,970
public static DeploymentType parseDeploymentFromString ( String deploymentString ) { ByteArrayInputStream byteIS ; byteIS = new ByteArrayInputStream ( deploymentString . getBytes ( Constants . UTF8ENCODING ) ) ; return getDeployment ( byteIS ) ; }
Parses the deployment XML string .
31,971
public static String getDeployment ( DeploymentType deployment , boolean indent ) throws IOException { try { if ( m_jc == null || m_schema == null ) { throw new RuntimeException ( "Error schema validation." ) ; } Marshaller marshaller = m_jc . createMarshaller ( ) ; marshaller . setSchema ( m_schema ) ; marshaller . setProperty ( Marshaller . JAXB_FORMATTED_OUTPUT , Boolean . valueOf ( indent ) ) ; StringWriter sw = new StringWriter ( ) ; marshaller . marshal ( new JAXBElement < > ( new QName ( "" , "deployment" ) , DeploymentType . class , deployment ) , sw ) ; return sw . toString ( ) ; } catch ( JAXBException e ) { if ( e . getLinkedException ( ) instanceof java . io . FileNotFoundException ) { hostLog . error ( e . getLinkedException ( ) . getMessage ( ) ) ; return null ; } else if ( e . getLinkedException ( ) instanceof org . xml . sax . SAXParseException ) { hostLog . error ( "Error schema validating deployment.xml file. " + e . getLinkedException ( ) . getMessage ( ) ) ; return null ; } else { throw new RuntimeException ( e ) ; } } }
Given the deployment object generate the XML
31,972
private static void validateDeployment ( Catalog catalog , DeploymentType deployment ) { if ( deployment . getSecurity ( ) != null && deployment . getSecurity ( ) . isEnabled ( ) ) { if ( deployment . getUsers ( ) == null ) { String msg = "Cannot enable security without defining at least one user in the built-in ADMINISTRATOR role in the deployment file." ; throw new RuntimeException ( msg ) ; } boolean foundAdminUser = false ; for ( UsersType . User user : deployment . getUsers ( ) . getUser ( ) ) { if ( user . getRoles ( ) == null ) { continue ; } for ( String role : extractUserRoles ( user ) ) { if ( role . equalsIgnoreCase ( ADMIN ) ) { foundAdminUser = true ; break ; } } } if ( ! foundAdminUser ) { String msg = "Cannot enable security without defining at least one user in the built-in ADMINISTRATOR role in the deployment file." ; throw new RuntimeException ( msg ) ; } } }
Validate the contents of the deployment . xml file . This is for validating VoltDB requirements not XML schema correctness
31,973
private static void setClusterInfo ( Catalog catalog , DeploymentType deployment ) { ClusterType cluster = deployment . getCluster ( ) ; int kFactor = cluster . getKfactor ( ) ; Cluster catCluster = catalog . getClusters ( ) . get ( "cluster" ) ; Deployment catDeploy = catCluster . getDeployment ( ) . get ( "deployment" ) ; catDeploy . setKfactor ( kFactor ) ; if ( deployment . getPartitionDetection ( ) . isEnabled ( ) ) { catCluster . setNetworkpartition ( true ) ; } else { catCluster . setNetworkpartition ( false ) ; } setSystemSettings ( deployment , catDeploy ) ; catCluster . setHeartbeattimeout ( deployment . getHeartbeat ( ) . getTimeout ( ) ) ; if ( cluster . getSchema ( ) != null ) { catCluster . setUseddlschema ( cluster . getSchema ( ) == SchemaType . DDL ) ; } else { hostLog . warn ( "Schema modification setting not found. " + "Forcing default behavior of UpdateCatalog to modify database schema." ) ; catCluster . setUseddlschema ( false ) ; } }
Set cluster info in the catalog .
31,974
private static void setImportInfo ( Catalog catalog , ImportType importType ) { if ( importType == null ) { return ; } List < String > streamList = new ArrayList < > ( ) ; List < ImportConfigurationType > kafkaConfigs = new ArrayList < > ( ) ; for ( ImportConfigurationType importConfiguration : importType . getConfiguration ( ) ) { boolean connectorEnabled = importConfiguration . isEnabled ( ) ; if ( ! connectorEnabled ) { continue ; } if ( importConfiguration . getType ( ) . equals ( ServerImportEnum . KAFKA ) ) { kafkaConfigs . add ( importConfiguration ) ; } if ( ! streamList . contains ( importConfiguration . getModule ( ) ) ) { streamList . add ( importConfiguration . getModule ( ) ) ; } buildImportProcessorConfiguration ( importConfiguration , true ) ; } validateKafkaConfig ( kafkaConfigs ) ; }
Set deployment time settings for import
31,975
private static void validateKafkaConfig ( List < ImportConfigurationType > configs ) { if ( configs . isEmpty ( ) ) { return ; } HashMap < String , HashSet < String > > groupidToTopics = new HashMap < > ( ) ; for ( ImportConfigurationType config : configs ) { String groupid = "" ; HashSet < String > topics = new HashSet < > ( ) ; for ( PropertyType pt : config . getProperty ( ) ) { if ( pt . getName ( ) . equals ( "topics" ) ) { topics . addAll ( Arrays . asList ( pt . getValue ( ) . split ( "\\s*,\\s*" ) ) ) ; } else if ( pt . getName ( ) . equals ( "groupid" ) ) { groupid = pt . getValue ( ) ; } } if ( groupidToTopics . containsKey ( groupid ) ) { HashSet < String > union = new HashSet < > ( groupidToTopics . get ( groupid ) ) ; union . addAll ( topics ) ; if ( union . size ( ) == ( topics . size ( ) + groupidToTopics . get ( groupid ) . size ( ) ) ) { groupidToTopics . put ( groupid , union ) ; } else { throw new RuntimeException ( "Invalid import configuration. Two Kafka entries have the same groupid and topic." ) ; } } else { groupidToTopics . put ( groupid , topics ) ; } } }
Check whether two Kafka configurations have both the same topic and group id . If two configurations have the same group id and overlapping sets of topics a RuntimeException will be thrown .
31,976
private static void setSnmpInfo ( SnmpType snmpType ) { if ( snmpType == null || ! snmpType . isEnabled ( ) ) { return ; } if ( snmpType . getTarget ( ) == null || snmpType . getTarget ( ) . trim ( ) . length ( ) == 0 ) { throw new IllegalArgumentException ( "Target must be specified for SNMP configuration." ) ; } if ( snmpType . getAuthkey ( ) != null && snmpType . getAuthkey ( ) . length ( ) < 8 ) { throw new IllegalArgumentException ( "SNMP Authkey must be > 8 characters." ) ; } if ( snmpType . getPrivacykey ( ) != null && snmpType . getPrivacykey ( ) . length ( ) < 8 ) { throw new IllegalArgumentException ( "SNMP Privacy Key must be > 8 characters." ) ; } }
Validate Snmp Configuration .
31,977
private static void mergeKafka10ImportConfigurations ( Map < String , ImportConfiguration > processorConfig ) { if ( processorConfig . isEmpty ( ) ) { return ; } Map < String , ImportConfiguration > kafka10ProcessorConfigs = new HashMap < > ( ) ; Iterator < Map . Entry < String , ImportConfiguration > > iter = processorConfig . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { String configName = iter . next ( ) . getKey ( ) ; ImportConfiguration importConfig = processorConfig . get ( configName ) ; Properties properties = importConfig . getmoduleProperties ( ) ; String importBundleJar = properties . getProperty ( ImportDataProcessor . IMPORT_MODULE ) ; Preconditions . checkNotNull ( importBundleJar , "Import source is undefined or custom import plugin class missing." ) ; String [ ] bundleJar = importBundleJar . split ( "kafkastream" ) ; if ( bundleJar . length > 1 ) { String version = bundleJar [ 1 ] . substring ( 0 , bundleJar [ 1 ] . indexOf ( ".jar" ) ) ; if ( ! version . isEmpty ( ) ) { int versionNumber = Integer . parseInt ( version ) ; if ( versionNumber == 10 ) { kafka10ProcessorConfigs . put ( configName , importConfig ) ; iter . remove ( ) ; } } } } if ( kafka10ProcessorConfigs . isEmpty ( ) ) { return ; } Map < String , ImportConfiguration > mergedConfigs = new HashMap < > ( ) ; iter = kafka10ProcessorConfigs . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { ImportConfiguration importConfig = iter . next ( ) . getValue ( ) ; Properties props = importConfig . getmoduleProperties ( ) ; String brokers = props . getProperty ( "brokers" ) ; String groupid = props . getProperty ( "groupid" , "voltdb" ) ; if ( brokers == null ) { brokers = props . getProperty ( "zookeeper" ) ; } String brokersGroup = brokers + "_" + groupid ; ImportConfiguration config = mergedConfigs . get ( brokersGroup ) ; if ( config == null ) { mergedConfigs . put ( brokersGroup , importConfig ) ; } else { config . mergeProperties ( props ) ; } } processorConfig . putAll ( mergedConfigs ) ; }
aggregate Kafka10 importer configurations . One importer per brokers and kafka group . Formatters and stored procedures can vary by topics .
31,978
private static void setSecurityEnabled ( Catalog catalog , SecurityType security ) { Cluster cluster = catalog . getClusters ( ) . get ( "cluster" ) ; Database database = cluster . getDatabases ( ) . get ( "database" ) ; cluster . setSecurityenabled ( security . isEnabled ( ) ) ; database . setSecurityprovider ( security . getProvider ( ) . value ( ) ) ; }
Set the security setting in the catalog from the deployment file
31,979
private static void setSnapshotInfo ( Catalog catalog , SnapshotType snapshotSettings ) { Database db = catalog . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) ; SnapshotSchedule schedule = db . getSnapshotschedule ( ) . get ( "default" ) ; if ( schedule == null ) { schedule = db . getSnapshotschedule ( ) . add ( "default" ) ; } schedule . setEnabled ( snapshotSettings . isEnabled ( ) ) ; String frequency = snapshotSettings . getFrequency ( ) ; if ( ! frequency . endsWith ( "s" ) && ! frequency . endsWith ( "m" ) && ! frequency . endsWith ( "h" ) ) { hostLog . error ( "Snapshot frequency " + frequency + " needs to end with time unit specified" + " that is one of [s, m, h] (seconds, minutes, hours)" + " Defaulting snapshot frequency to 10m." ) ; frequency = "10m" ; } int frequencyInt = 0 ; String frequencySubstring = frequency . substring ( 0 , frequency . length ( ) - 1 ) ; try { frequencyInt = Integer . parseInt ( frequencySubstring ) ; } catch ( Exception e ) { hostLog . error ( "Frequency " + frequencySubstring + " is not an integer. Defaulting frequency to 10m." ) ; frequency = "10m" ; frequencyInt = 10 ; } String prefix = snapshotSettings . getPrefix ( ) ; if ( prefix == null || prefix . isEmpty ( ) ) { hostLog . error ( "Snapshot prefix " + prefix + " is not a valid prefix. Using prefix of 'SNAPSHOTNONCE' " ) ; prefix = "SNAPSHOTNONCE" ; } if ( prefix . contains ( "-" ) || prefix . contains ( "," ) ) { String oldprefix = prefix ; prefix = prefix . replaceAll ( "-" , "_" ) ; prefix = prefix . replaceAll ( "," , "_" ) ; hostLog . error ( "Snapshot prefix " + oldprefix + " cannot include , or -." + " Using the prefix: " + prefix + " instead." ) ; } int retain = snapshotSettings . getRetain ( ) ; if ( retain < 1 ) { hostLog . error ( "Snapshot retain value " + retain + " is not a valid value. Must be 1 or greater." + " Defaulting snapshot retain to 1." ) ; retain = 1 ; } schedule . setFrequencyunit ( frequency . substring ( frequency . length ( ) - 1 , frequency . length ( ) ) ) ; schedule . setFrequencyvalue ( frequencyInt ) ; schedule . setPrefix ( prefix ) ; schedule . setRetain ( retain ) ; }
Set the auto - snapshot settings in the catalog from the deployment file
31,980
private static void setupPaths ( PathsType paths ) { File voltDbRoot ; voltDbRoot = getVoltDbRoot ( paths ) ; setupSnapshotPaths ( paths . getSnapshots ( ) , voltDbRoot ) ; setupExportOverflow ( paths . getExportoverflow ( ) , voltDbRoot ) ; setupCommandLog ( paths . getCommandlog ( ) , voltDbRoot ) ; setupCommandLogSnapshot ( paths . getCommandlogsnapshot ( ) , voltDbRoot ) ; setupDROverflow ( paths . getDroverflow ( ) , voltDbRoot ) ; setupLargeQuerySwap ( paths . getLargequeryswap ( ) , voltDbRoot ) ; }
Set voltroot path and set the path overrides for export overflow partition etc .
31,981
public static File getVoltDbRoot ( PathsType paths ) { File voltDbRoot ; if ( paths == null || paths . getVoltdbroot ( ) == null || VoltDB . instance ( ) . getVoltDBRootPath ( paths . getVoltdbroot ( ) ) == null ) { voltDbRoot = new VoltFile ( VoltDB . DBROOT ) ; if ( ! voltDbRoot . exists ( ) ) { hostLog . info ( "Creating voltdbroot directory: " + voltDbRoot . getAbsolutePath ( ) ) ; if ( ! voltDbRoot . mkdirs ( ) ) { hostLog . fatal ( "Failed to create voltdbroot directory \"" + voltDbRoot . getAbsolutePath ( ) + "\"" ) ; } } } else { voltDbRoot = new VoltFile ( VoltDB . instance ( ) . getVoltDBRootPath ( paths . getVoltdbroot ( ) ) ) ; if ( ! voltDbRoot . exists ( ) ) { hostLog . info ( "Creating voltdbroot directory: " + voltDbRoot . getAbsolutePath ( ) ) ; if ( ! voltDbRoot . mkdirs ( ) ) { hostLog . fatal ( "Failed to create voltdbroot directory \"" + voltDbRoot . getAbsolutePath ( ) + "\"" ) ; } } } validateDirectory ( "volt root" , voltDbRoot ) ; return voltDbRoot ; }
Get a File object representing voltdbroot . Create directory if missing . Use paths if non - null to get override default location .
31,982
private static void setUsersInfo ( Catalog catalog , UsersType users ) throws RuntimeException { if ( users == null ) { return ; } Database db = catalog . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) ; SecureRandom sr = new SecureRandom ( ) ; for ( UsersType . User user : users . getUser ( ) ) { Set < String > roles = extractUserRoles ( user ) ; String sha1hex = user . getPassword ( ) ; String sha256hex = user . getPassword ( ) ; if ( user . isPlaintext ( ) ) { sha1hex = extractPassword ( user . getPassword ( ) , ClientAuthScheme . HASH_SHA1 ) ; sha256hex = extractPassword ( user . getPassword ( ) , ClientAuthScheme . HASH_SHA256 ) ; } else if ( user . getPassword ( ) . length ( ) == 104 ) { int sha1len = ClientAuthScheme . getHexencodedDigestLength ( ClientAuthScheme . HASH_SHA1 ) ; sha1hex = sha1hex . substring ( 0 , sha1len ) ; sha256hex = sha256hex . substring ( sha1len ) ; } else { hostLog . warn ( "User \"" + user . getName ( ) + "\" has invalid masked password in deployment file." ) ; throw new RuntimeException ( "User \"" + user . getName ( ) + "\" has invalid masked password in deployment file" ) ; } org . voltdb . catalog . User catUser = db . getUsers ( ) . get ( user . getName ( ) ) ; if ( catUser == null ) { catUser = db . getUsers ( ) . add ( user . getName ( ) ) ; } String saltGen = BCrypt . gensalt ( BCrypt . GENSALT_DEFAULT_LOG2_ROUNDS , sr ) ; String hashedPW = BCrypt . hashpw ( sha1hex , saltGen ) ; String hashedPW256 = BCrypt . hashpw ( sha256hex , saltGen ) ; catUser . setShadowpassword ( hashedPW ) ; catUser . setSha256shadowpassword ( hashedPW256 ) ; catUser . setPassword ( BCrypt . hashpw ( sha256hex , "$2a$10$pWO/a/OQkFyQWQDpchZdEe" ) ) ; for ( final String role : roles ) { final Group catalogGroup = db . getGroups ( ) . get ( role ) ; if ( catalogGroup != null ) { GroupRef groupRef = catUser . getGroups ( ) . get ( role ) ; if ( groupRef == null ) { groupRef = catUser . getGroups ( ) . add ( role ) ; } groupRef . setGroup ( catalogGroup ) ; } else { hostLog . warn ( "User \"" + user . getName ( ) + "\" is assigned to non-existent role \"" + role + "\" " + "and may not have the expected database permissions." ) ; } } } }
Set user info in the catalog .
31,983
private static Set < String > extractUserRoles ( final UsersType . User user ) { Set < String > roles = new TreeSet < > ( ) ; if ( user == null ) { return roles ; } if ( user . getRoles ( ) != null && ! user . getRoles ( ) . trim ( ) . isEmpty ( ) ) { String [ ] rolelist = user . getRoles ( ) . trim ( ) . split ( "," ) ; for ( String role : rolelist ) { if ( role == null || role . trim ( ) . isEmpty ( ) ) { continue ; } roles . add ( role . trim ( ) . toLowerCase ( ) ) ; } } return roles ; }
Takes the list of roles specified in the roles user attributes and returns a set from the comma - separated list
31,984
public static byte [ ] makeDeploymentHash ( byte [ ] inbytes ) { MessageDigest md = null ; try { md = MessageDigest . getInstance ( "SHA-1" ) ; } catch ( NoSuchAlgorithmException e ) { VoltDB . crashLocalVoltDB ( "Bad JVM has no SHA-1 hash." , true , e ) ; } md . update ( inbytes ) ; byte [ ] hash = md . digest ( ) ; assert ( hash . length == 20 ) ; return hash ; }
This code appeared repeatedly . Extract method to take bytes for the catalog or deployment file do the irritating exception crash test jam the bytes in and get the SHA - 1 hash .
31,985
public static Pair < Set < String > , Set < String > > getSnapshotableTableNamesFromInMemoryJar ( InMemoryJarfile jarfile ) { Set < String > fullTableNames = new HashSet < > ( ) ; Set < String > optionalTableNames = new HashSet < > ( ) ; Catalog catalog = new Catalog ( ) ; catalog . execute ( getSerializedCatalogStringFromJar ( jarfile ) ) ; Database db = catalog . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) ; Pair < List < Table > , Set < String > > ret ; ret = getSnapshotableTables ( db , true ) ; ret . getFirst ( ) . forEach ( table -> fullTableNames . add ( table . getTypeName ( ) ) ) ; optionalTableNames . addAll ( ret . getSecond ( ) ) ; ret = getSnapshotableTables ( db , false ) ; ret . getFirst ( ) . forEach ( table -> fullTableNames . add ( table . getTypeName ( ) ) ) ; optionalTableNames . addAll ( ret . getSecond ( ) ) ; return new Pair < Set < String > , Set < String > > ( fullTableNames , optionalTableNames ) ; }
Get all snapshot - able table names from an in - memory catalog jar file . A snapshot - able table is one that s neither an export table nor an implicitly partitioned view .
31,986
public static Pair < List < Table > , Set < String > > getSnapshotableTables ( Database catalog , boolean isReplicated ) { List < Table > tables = new ArrayList < > ( ) ; Set < String > optionalTableNames = new HashSet < > ( ) ; for ( Table table : catalog . getTables ( ) ) { if ( table . getIsreplicated ( ) != isReplicated ) { continue ; } if ( isTableExportOnly ( catalog , table ) ) { continue ; } if ( table . getMaterializer ( ) != null ) { if ( isSnapshotablePersistentTableView ( catalog , table ) ) { optionalTableNames . add ( table . getTypeName ( ) ) ; } else if ( ! isSnapshotableStreamedTableView ( catalog , table ) ) { continue ; } } tables . add ( table ) ; } return new Pair < List < Table > , Set < String > > ( tables , optionalTableNames ) ; }
Get all snapshot - able tables from the catalog . A snapshot - able table is one that s neither an export table nor an implicitly partitioned view .
31,987
public static List < Table > getNormalTables ( Database catalog , boolean isReplicated ) { List < Table > tables = new ArrayList < > ( ) ; for ( Table table : catalog . getTables ( ) ) { if ( ( table . getIsreplicated ( ) == isReplicated ) && table . getMaterializer ( ) == null && ! CatalogUtil . isTableExportOnly ( catalog , table ) ) { tables . add ( table ) ; continue ; } if ( ( table . getMaterializer ( ) != null ) && ! isReplicated && ( CatalogUtil . isTableExportOnly ( catalog , table . getMaterializer ( ) ) ) ) { Column bpc = table . getMaterializer ( ) . getPartitioncolumn ( ) ; if ( bpc != null ) { String bPartName = bpc . getName ( ) ; Column pc = table . getColumns ( ) . get ( bPartName ) ; if ( pc != null ) { tables . add ( table ) ; } } } } return tables ; }
Get all normal tables from the catalog . A normal table is one that s NOT a materialized view nor an export table . For the lack of a better name I call it normal .
31,988
public static boolean isDurableProc ( String procName ) { SystemProcedureCatalog . Config sysProc = SystemProcedureCatalog . listing . get ( procName ) ; return sysProc == null || sysProc . isDurable ( ) ; }
Return if given proc is durable if its a sysproc SystemProcedureCatalog is consulted . All non sys procs are all durable .
31,989
public static File createTemporaryEmptyCatalogJarFile ( boolean isXDCR ) throws IOException { File emptyJarFile = File . createTempFile ( "catalog-empty" , ".jar" ) ; emptyJarFile . deleteOnExit ( ) ; VoltCompiler compiler = new VoltCompiler ( isXDCR ) ; if ( ! compiler . compileEmptyCatalog ( emptyJarFile . getAbsolutePath ( ) ) ) { return null ; } return emptyJarFile ; }
Build an empty catalog jar file .
31,990
public static String getSignatureForTable ( String name , SortedMap < Integer , VoltType > schema ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( name ) . append ( SIGNATURE_TABLE_NAME_SEPARATOR ) ; for ( VoltType t : schema . values ( ) ) { sb . append ( t . getSignatureChar ( ) ) ; } return sb . toString ( ) ; }
Get a string signature for the table represented by the args
31,991
public static Pair < Long , String > calculateDrTableSignatureAndCrc ( Database catalog ) { SortedSet < Table > tables = Sets . newTreeSet ( ) ; tables . addAll ( getSnapshotableTables ( catalog , true ) . getFirst ( ) ) ; tables . addAll ( getSnapshotableTables ( catalog , false ) . getFirst ( ) ) ; final PureJavaCrc32 crc = new PureJavaCrc32 ( ) ; final StringBuilder sb = new StringBuilder ( ) ; String delimiter = "" ; for ( Table t : tables ) { if ( t . getIsdred ( ) ) { crc . update ( t . getSignature ( ) . getBytes ( Charsets . UTF_8 ) ) ; sb . append ( delimiter ) . append ( t . getSignature ( ) ) ; delimiter = SIGNATURE_DELIMITER ; } } return Pair . of ( crc . getValue ( ) , sb . toString ( ) ) ; }
Deterministically serializes all DR table signatures into a string and calculates the CRC checksum .
31,992
public static Map < String , String > deserializeCatalogSignature ( String signature ) { Map < String , String > tableSignatures = Maps . newHashMap ( ) ; for ( String oneSig : signature . split ( Pattern . quote ( SIGNATURE_DELIMITER ) ) ) { if ( ! oneSig . isEmpty ( ) ) { final String [ ] parts = oneSig . split ( Pattern . quote ( SIGNATURE_TABLE_NAME_SEPARATOR ) , 2 ) ; tableSignatures . put ( parts [ 0 ] , parts [ 1 ] ) ; } } return tableSignatures ; }
Deserializes a catalog DR table signature string into a map of table signatures .
31,993
public static String getLimitPartitionRowsDeleteStmt ( Table table ) { CatalogMap < Statement > map = table . getTuplelimitdeletestmt ( ) ; if ( map . isEmpty ( ) ) { return null ; } assert ( map . size ( ) == 1 ) ; return map . iterator ( ) . next ( ) . getSqltext ( ) ; }
Given a table return the DELETE statement that can be executed by a LIMIT PARTITION ROWS constraint or NULL if there isn t one .
31,994
public static ExportType addExportConfigToDRConflictsTable ( ExportType export ) { if ( export == null ) { export = new ExportType ( ) ; } boolean userDefineStream = false ; for ( ExportConfigurationType exportConfiguration : export . getConfiguration ( ) ) { if ( exportConfiguration . getTarget ( ) . equals ( DR_CONFLICTS_TABLE_EXPORT_GROUP ) ) { userDefineStream = true ; } } if ( ! userDefineStream ) { ExportConfigurationType defaultConfiguration = new ExportConfigurationType ( ) ; defaultConfiguration . setEnabled ( true ) ; defaultConfiguration . setTarget ( DR_CONFLICTS_TABLE_EXPORT_GROUP ) ; defaultConfiguration . setType ( ServerExportEnum . FILE ) ; PropertyType type = new PropertyType ( ) ; type . setName ( "type" ) ; type . setValue ( DEFAULT_DR_CONFLICTS_EXPORT_TYPE ) ; defaultConfiguration . getProperty ( ) . add ( type ) ; PropertyType nonce = new PropertyType ( ) ; nonce . setName ( "nonce" ) ; nonce . setValue ( DEFAULT_DR_CONFLICTS_NONCE ) ; defaultConfiguration . getProperty ( ) . add ( nonce ) ; PropertyType outdir = new PropertyType ( ) ; outdir . setName ( "outdir" ) ; outdir . setValue ( DEFAULT_DR_CONFLICTS_DIR ) ; defaultConfiguration . getProperty ( ) . add ( outdir ) ; PropertyType ksafe = new PropertyType ( ) ; ksafe . setName ( "replicated" ) ; ksafe . setValue ( "true" ) ; defaultConfiguration . getProperty ( ) . add ( ksafe ) ; PropertyType skipinternal = new PropertyType ( ) ; skipinternal . setName ( "skipinternals" ) ; skipinternal . setValue ( "true" ) ; defaultConfiguration . getProperty ( ) . add ( skipinternal ) ; export . getConfiguration ( ) . add ( defaultConfiguration ) ; } return export ; }
Add default configuration to DR conflicts export target if deployment file doesn t have the configuration
31,995
public synchronized void printResults ( ) throws Exception { ClientStats stats = fullStatsContext . fetch ( ) . getStats ( ) ; String display = "\nA total of %d login requests were received...\n" ; System . out . printf ( display , stats . getInvocationsCompleted ( ) ) ; System . out . printf ( "Average throughput: %,9d txns/sec\n" , stats . getTxnThroughput ( ) ) ; System . out . printf ( "Average latency: %,9.2f ms\n" , stats . getAverageLatency ( ) ) ; System . out . printf ( "10th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .1 ) ) ; System . out . printf ( "25th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .25 ) ) ; System . out . printf ( "50th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .5 ) ) ; System . out . printf ( "75th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .75 ) ) ; System . out . printf ( "90th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .9 ) ) ; System . out . printf ( "95th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .95 ) ) ; System . out . printf ( "99th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .99 ) ) ; System . out . printf ( "99.5th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .995 ) ) ; System . out . printf ( "99.9th percentile latency: %,9.2f ms\n" , stats . kPercentileLatencyAsDouble ( .999 ) ) ; System . out . println ( "\n\n" + stats . latencyHistoReport ( ) ) ; }
Prints the results and statistics of the data load .
31,996
private void doLogin ( LoginGenerator . LoginRecord login ) { try { ClientResponse response = client . callProcedure ( "Login" , login . username , login . password , login . json ) ; long resultCode = response . getResults ( ) [ 0 ] . asScalarLong ( ) ; if ( resultCode == LOGIN_SUCCESSFUL ) acceptedLogins . incrementAndGet ( ) ; else badLogins . incrementAndGet ( ) ; } catch ( Exception e ) { badLogins . incrementAndGet ( ) ; e . printStackTrace ( ) ; } }
Invoke the Login stored procedure to add a login record to the database . If the login is called multiple times for the same username the last accessed time for the login is updated . Thus this sample client can be run repeatedly without having to cycle the database .
31,997
public void loadDatabase ( ) throws Exception { int thread_count = 10 ; Thread [ ] loginThreads = new Thread [ thread_count ] ; for ( int i = 0 ; i < thread_count ; ++ i ) { loginThreads [ i ] = new Thread ( new LoginThread ( ) ) ; loginThreads [ i ] . start ( ) ; } fullStatsContext . fetchAndResetBaseline ( ) ; System . out . println ( "\nLoading database..." ) ; Thread . sleep ( 1000l * 10 ) ; loadComplete . set ( true ) ; client . drain ( ) ; for ( Thread t : loginThreads ) { t . join ( ) ; } printResults ( ) ; createUniqueData ( ) ; }
Load the database with as much data as possible within the specified time range .
31,998
public static void main ( String [ ] args ) throws Exception { JSONClient app = new JSONClient ( ) ; app . initialize ( ) ; app . loadDatabase ( ) ; app . runQueries ( ) ; app . shutdown ( ) ; }
Main routine creates a client instance loads the database then executes example queries against the data .
31,999
public synchronized void reportForeignHostFailed ( int hostId ) { long initiatorSiteId = CoreUtils . getHSIdFromHostAndSite ( hostId , AGREEMENT_SITE_ID ) ; m_agreementSite . reportFault ( initiatorSiteId ) ; if ( ! m_shuttingDown ) { networkLog . warn ( String . format ( "Host %d failed. Cluster remains operational." , hostId ) ) ; } }
Synchronization protects m_knownFailedHosts and ensures that every failed host is only reported once