idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
32,200
HsqlName getSchemaHsqlName ( String name ) { return name == null ? currentSchema : database . schemaManager . getSchemaHsqlName ( name ) ; }
If schemaName is null return the current schema name else return the HsqlName object for the schema . If schemaName does not exist throw .
32,201
public String getSchemaName ( String name ) { return name == null ? currentSchema . name : database . schemaManager . getSchemaName ( name ) ; }
Same as above but return string
32,202
public Table defineLocalTable ( HsqlName tableName , HsqlName [ ] colNames , Type [ ] colTypes ) { assert ( localTables != null ) ; Table newTable = TableUtil . newTable ( database , TableBase . CACHED_TABLE , tableName ) ; TableUtil . setColumnsInSchemaTable ( newTable , colNames , colTypes ) ; newTable . createPrimaryKey ( new int [ 0 ] ) ; localTables . put ( tableName . name , newTable ) ; return newTable ; }
Define a local table with the given name column names and column types .
32,203
public void updateLocalTable ( HsqlName queryName , Type [ ] finalTypes ) { assert ( localTables != null ) ; Table tbl = getLocalTable ( queryName . name ) ; assert ( tbl != null ) ; TableUtil . updateColumnTypes ( tbl , finalTypes ) ; }
Update the local table with new types . This is very dubious .
32,204
void logSequences ( ) { OrderedHashSet set = sessionData . sequenceUpdateSet ; if ( set == null || set . isEmpty ( ) ) { return ; } for ( int i = 0 , size = set . size ( ) ; i < size ; i ++ ) { NumberSequence sequence = ( NumberSequence ) set . get ( i ) ; database . logger . writeSequenceStatement ( this , sequence ) ; } sessionData . sequenceUpdateSet . clear ( ) ; }
SEQUENCE current values
32,205
public void addLiteralSchema ( String ddlText ) throws IOException { File temp = File . createTempFile ( "literalschema" , "sql" ) ; temp . deleteOnExit ( ) ; FileWriter out = new FileWriter ( temp ) ; out . write ( ddlText ) ; out . close ( ) ; addSchema ( URLEncoder . encode ( temp . getAbsolutePath ( ) , "UTF-8" ) ) ; }
This is test code written by Ryan even though it was committed by John .
32,206
public void addSchema ( String schemaURL ) { try { schemaURL = URLDecoder . decode ( schemaURL , "UTF-8" ) ; } catch ( final UnsupportedEncodingException e ) { e . printStackTrace ( ) ; System . exit ( - 1 ) ; } assert ( m_schemas . contains ( schemaURL ) == false ) ; final File schemaFile = new File ( schemaURL ) ; assert ( schemaFile != null ) ; assert ( schemaFile . isDirectory ( ) == false ) ; m_schemas . add ( schemaURL ) ; }
Add a schema based on a URL .
32,207
private static boolean isParameterized ( VoltXMLElement elm ) { final String name = elm . name ; if ( name . equals ( "value" ) ) { return elm . getBoolAttribute ( "isparam" , false ) ; } else if ( name . equals ( "vector" ) || name . equals ( "row" ) ) { return elm . children . stream ( ) . anyMatch ( ExpressionUtil :: isParameterized ) ; } else if ( name . equals ( "columnref" ) || name . equals ( "function" ) || name . equals ( "tablesubquery" ) ) { return false ; } else { assert name . equals ( "operation" ) : "unknown VoltXMLElement type: " + name ; final ExpressionType op = mapOfVoltXMLOpType . get ( elm . attributes . get ( "optype" ) ) ; assert op != null ; switch ( op ) { case CONJUNCTION_OR : case CONJUNCTION_AND : case COMPARE_GREATERTHAN : case COMPARE_LESSTHAN : case COMPARE_EQUAL : case COMPARE_NOTEQUAL : case COMPARE_GREATERTHANOREQUALTO : case COMPARE_LESSTHANOREQUALTO : case OPERATOR_PLUS : case OPERATOR_MINUS : case OPERATOR_MULTIPLY : case OPERATOR_DIVIDE : case OPERATOR_CONCAT : case OPERATOR_MOD : case COMPARE_IN : return isParameterized ( elm . children . get ( 0 ) ) || isParameterized ( elm . children . get ( 1 ) ) ; case OPERATOR_IS_NULL : case OPERATOR_EXISTS : case OPERATOR_NOT : case OPERATOR_UNARY_MINUS : return isParameterized ( elm . children . get ( 0 ) ) ; default : assert false ; return false ; } } }
Helper to check if a VoltXMLElement contains parameter .
32,208
private static String getType ( Database db , VoltXMLElement elm ) { final String type = elm . getStringAttribute ( "valuetype" , "" ) ; if ( ! type . isEmpty ( ) ) { return type ; } else if ( elm . name . equals ( "columnref" ) ) { final String tblName = elm . getStringAttribute ( "table" , "" ) ; final int colIndex = elm . getIntAttribute ( "index" , 0 ) ; return StreamSupport . stream ( db . getTables ( ) . spliterator ( ) , false ) . filter ( tbl -> tbl . getTypeName ( ) . equals ( tblName ) ) . findAny ( ) . flatMap ( tbl -> StreamSupport . stream ( tbl . getColumns ( ) . spliterator ( ) , false ) . filter ( col -> col . getIndex ( ) == colIndex ) . findAny ( ) ) . map ( Column :: getType ) . map ( typ -> VoltType . get ( ( byte ) ( ( int ) typ ) ) . getName ( ) ) . orElse ( "" ) ; } else { return "" ; } }
Get the underlying type of the VoltXMLElement node . Need reference to the catalog for PVE
32,209
private static String guessParameterType ( Database db , VoltXMLElement elm ) { if ( ! isParameterized ( elm ) || ! elm . name . equals ( "operation" ) ) { return "" ; } else { final ExpressionType op = mapOfVoltXMLOpType . get ( elm . attributes . get ( "optype" ) ) ; assert op != null ; switch ( op ) { case CONJUNCTION_OR : case CONJUNCTION_AND : case OPERATOR_NOT : return "boolean" ; case COMPARE_GREATERTHAN : case COMPARE_LESSTHAN : case COMPARE_EQUAL : case COMPARE_NOTEQUAL : case COMPARE_GREATERTHANOREQUALTO : case COMPARE_LESSTHANOREQUALTO : case OPERATOR_PLUS : case OPERATOR_MINUS : case OPERATOR_MULTIPLY : case OPERATOR_DIVIDE : case OPERATOR_CONCAT : case OPERATOR_MOD : case COMPARE_IN : final VoltXMLElement left = elm . children . get ( 0 ) , right = elm . children . get ( 1 ) ; return isParameterized ( left ) ? getType ( db , right ) : getType ( db , left ) ; case OPERATOR_UNARY_MINUS : return "integer" ; case OPERATOR_IS_NULL : case OPERATOR_EXISTS : return "" ; default : assert false ; return "" ; } } }
Guess from a parent node what are the parameter type of its child node should one of its child node contain parameter .
32,210
public static boolean reduce ( AbstractExpression expr , Predicate < AbstractExpression > pred ) { final boolean current = pred . test ( expr ) ; if ( current ) { return true ; } else if ( expr == null ) { return pred . test ( null ) ; } else { return pred . test ( expr . getLeft ( ) ) || pred . test ( expr . getRight ( ) ) || expr . getArgs ( ) != null && expr . getArgs ( ) . stream ( ) . anyMatch ( pred ) ; } }
Check if any node of given expression tree satisfies given predicate
32,211
public static Collection < AbstractExpression > uncombineAny ( AbstractExpression expr ) { ArrayDeque < AbstractExpression > out = new ArrayDeque < AbstractExpression > ( ) ; if ( expr != null ) { ArrayDeque < AbstractExpression > in = new ArrayDeque < AbstractExpression > ( ) ; in . add ( expr ) ; AbstractExpression inExpr = null ; while ( ( inExpr = in . poll ( ) ) != null ) { if ( inExpr . getExpressionType ( ) == ExpressionType . CONJUNCTION_AND ) { in . add ( inExpr . getLeft ( ) ) ; in . add ( inExpr . getRight ( ) ) ; } else { out . add ( inExpr ) ; } } } return out ; }
Convert one or more predicates potentially in an arbitrarily nested conjunction tree into a flattened collection . Similar to uncombine but for arbitrary tree shapes and with no guarantee of the result collection type or of any ordering within the collection . In fact it currently fills an ArrayDeque via a left = to - right breadth first traversal but for no particular reason so that s all subject to change .
32,212
public static List < TupleValueExpression > getTupleValueExpressions ( AbstractExpression input ) { ArrayList < TupleValueExpression > tves = new ArrayList < TupleValueExpression > ( ) ; if ( input == null ) { return tves ; } else if ( input instanceof TupleValueExpression ) { tves . add ( ( TupleValueExpression ) input ) ; return tves ; } tves . addAll ( getTupleValueExpressions ( input . m_left ) ) ; tves . addAll ( getTupleValueExpressions ( input . m_right ) ) ; if ( input . m_args != null ) { for ( AbstractExpression argument : input . m_args ) { tves . addAll ( getTupleValueExpressions ( argument ) ) ; } } return tves ; }
Recursively walk an expression and return a list of all the tuple value expressions it contains .
32,213
private static boolean subqueryRequiresScalarValueExpressionFromContext ( AbstractExpression parentExpr ) { if ( parentExpr == null ) { return true ; } if ( parentExpr . getExpressionType ( ) == ExpressionType . OPERATOR_EXISTS || parentExpr instanceof ComparisonExpression ) { return false ; } if ( parentExpr instanceof ScalarValueExpression ) { return false ; } return true ; }
Return true if we must insert a ScalarValueExpression between a subquery and its parent expression .
32,214
private static AbstractExpression addScalarValueExpression ( SelectSubqueryExpression expr ) { if ( expr . getSubqueryScan ( ) . getOutputSchema ( ) . size ( ) != 1 ) { throw new PlanningErrorException ( "Scalar subquery can have only one output column" ) ; } expr . changeToScalarExprType ( ) ; AbstractExpression scalarExpr = new ScalarValueExpression ( ) ; scalarExpr . setLeft ( expr ) ; scalarExpr . setValueType ( expr . getValueType ( ) ) ; scalarExpr . setValueSize ( expr . getValueSize ( ) ) ; return scalarExpr ; }
Add a ScalarValueExpression on top of the SubqueryExpression
32,215
public ClientResponseImpl shouldAccept ( String name , AuthSystem . AuthUser user , final StoredProcedureInvocation task , final Procedure catProc ) { if ( user . isAuthEnabled ( ) ) { InvocationPermissionPolicy deniedPolicy = null ; InvocationPermissionPolicy . PolicyResult res = InvocationPermissionPolicy . PolicyResult . DENY ; for ( InvocationPermissionPolicy policy : m_permissionpolicies ) { res = policy . shouldAccept ( user , task , catProc ) ; if ( res == InvocationPermissionPolicy . PolicyResult . ALLOW ) { deniedPolicy = null ; break ; } if ( res == InvocationPermissionPolicy . PolicyResult . DENY ) { if ( deniedPolicy == null ) { deniedPolicy = policy ; } } } if ( deniedPolicy != null ) { return deniedPolicy . getErrorResponse ( user , task , catProc ) ; } assert ( res == InvocationPermissionPolicy . PolicyResult . ALLOW ) ; return null ; } return null ; }
For auth disabled user the first policy will return ALLOW breaking the loop .
32,216
@ SuppressWarnings ( "deprecation" ) public VoltTable [ ] run ( SystemProcedureExecutionContext ctx , String username , String remoteHost , String xmlConfig ) { long oldLevels = 0 ; if ( ctx . isLowestSiteId ( ) ) { hostLog . info ( String . format ( "%s from %s changed the log4j settings" , username , remoteHost ) ) ; hostLog . info ( xmlConfig ) ; oldLevels = hostLog . getLogLevels ( loggers ) ; } try { barrier . await ( ) ; } catch ( InterruptedException | BrokenBarrierException dontcare ) { } VoltDB . instance ( ) . logUpdate ( xmlConfig , DeprecatedProcedureAPIAccess . getVoltPrivateRealTransactionId ( this ) , ctx . getPaths ( ) . getVoltDBRoot ( ) ) ; ctx . updateBackendLogLevels ( ) ; if ( ctx . isLowestSiteId ( ) ) { long newLevels = hostLog . getLogLevels ( loggers ) ; if ( newLevels != oldLevels ) { int index = ( int ) ( ( oldLevels >> 3 ) & 7 ) ; Level before = Level . values ( ) [ index ] ; index = ( int ) ( ( newLevels >> 3 ) & 7 ) ; Level after = Level . values ( ) [ index ] ; if ( before . ordinal ( ) > Level . INFO . ordinal ( ) && after . ordinal ( ) <= Level . INFO . ordinal ( ) ) { hostLog . info ( String . format ( "%s from %s changed the log4j settings" , username , remoteHost ) ) ; hostLog . info ( xmlConfig ) ; } } barrier . reset ( ) ; } VoltTable t = new VoltTable ( VoltSystemProcedure . STATUS_SCHEMA ) ; t . addRow ( VoltSystemProcedure . STATUS_OK ) ; return ( new VoltTable [ ] { t } ) ; }
Change the operational log configuration .
32,217
public void add ( String item , String value ) { int maxChar = MaxLenInZChoice ; if ( item . length ( ) < MaxLenInZChoice ) { maxChar = item . length ( ) ; } super . add ( item . substring ( 0 , maxChar ) ) ; values . addElement ( value ) ; }
restrict strings for the choice to MaxLenInZChoice characters
32,218
private int findValue ( String s ) { for ( int i = 0 ; i < values . size ( ) ; i ++ ) { if ( s . equals ( values . elementAt ( i ) ) ) { return i ; } } return - 1 ; }
find for a given value the index in values
32,219
public static ByteBuffer getNextChunk ( byte [ ] schemaBytes , ByteBuffer buf , CachedByteBufferAllocator resultBufferAllocator ) { buf . position ( buf . position ( ) + 4 ) ; int length = schemaBytes . length + buf . remaining ( ) ; ByteBuffer outputBuffer = resultBufferAllocator . allocate ( length ) ; outputBuffer . put ( schemaBytes ) ; outputBuffer . put ( buf ) ; outputBuffer . flip ( ) ; return outputBuffer ; }
Assemble the chunk so that it can be used to construct the VoltTable that will be passed to EE .
32,220
private RestoreWork processMessage ( DecodedContainer msg , CachedByteBufferAllocator resultBufferAllocator ) { if ( msg == null ) { return null ; } RestoreWork restoreWork = null ; try { if ( msg . m_msgType == StreamSnapshotMessageType . FAILURE ) { VoltDB . crashLocalVoltDB ( "Rejoin source sent failure message." , false , null ) ; if ( m_expectedEOFs . decrementAndGet ( ) == 0 ) { m_EOF = true ; } } else if ( msg . m_msgType == StreamSnapshotMessageType . END ) { if ( rejoinLog . isTraceEnabled ( ) ) { rejoinLog . trace ( "Got END message " + msg . m_blockIndex + " from " + CoreUtils . hsIdToString ( msg . m_srcHSId ) + " (TargetId " + msg . m_dataTargetId + ")" ) ; } if ( m_expectedEOFs . decrementAndGet ( ) == 0 ) { m_EOF = true ; } } else if ( msg . m_msgType == StreamSnapshotMessageType . SCHEMA ) { rejoinLog . trace ( "Got SCHEMA message " + msg . m_blockIndex + " from " + CoreUtils . hsIdToString ( msg . m_srcHSId ) + " (TargetId " + msg . m_dataTargetId + ")" ) ; ByteBuffer block = msg . m_container . b ( ) ; block . position ( StreamSnapshotDataTarget . contentOffset ) ; byte [ ] schemaBytes = new byte [ block . remaining ( ) ] ; block . get ( schemaBytes ) ; m_schemas . put ( msg . m_tableId , schemaBytes ) ; } else if ( msg . m_msgType == StreamSnapshotMessageType . HASHINATOR ) { ByteBuffer block = msg . m_container . b ( ) ; block . position ( StreamSnapshotDataTarget . contentOffset ) ; long version = block . getLong ( ) ; byte [ ] hashinatorConfig = new byte [ block . remaining ( ) ] ; block . get ( hashinatorConfig ) ; restoreWork = new HashinatorRestoreWork ( version , hashinatorConfig ) ; } else { rejoinLog . trace ( "Got DATA message " + msg . m_blockIndex + " from " + CoreUtils . hsIdToString ( msg . m_srcHSId ) + " (TargetId " + msg . m_dataTargetId + ")" ) ; ByteBuffer block = msg . m_container . b ( ) ; if ( ! m_schemas . containsKey ( msg . m_tableId ) ) { VoltDB . crashLocalVoltDB ( "No schema for table with ID " + msg . m_tableId , false , null ) ; } block . position ( StreamSnapshotDataTarget . contentOffset ) ; ByteBuffer nextChunk = getNextChunk ( m_schemas . get ( msg . m_tableId ) , block , resultBufferAllocator ) ; m_bytesReceived += nextChunk . remaining ( ) ; restoreWork = new TableRestoreWork ( msg . m_tableId , nextChunk ) ; } return restoreWork ; } finally { msg . m_container . discard ( ) ; m_ack . ack ( msg . m_srcHSId , msg . m_msgType == StreamSnapshotMessageType . END , msg . m_dataTargetId , msg . m_blockIndex ) ; } }
Process a message pulled off from the network thread and discard the container once it s processed .
32,221
public static void copyFile ( String fromPath , String toPath ) throws Exception { File inputFile = new File ( fromPath ) ; File outputFile = new File ( toPath ) ; com . google_voltpatches . common . io . Files . copy ( inputFile , outputFile ) ; }
Simple code to copy a file from one place to another ... Java should have this built in ... stupid java ...
32,222
public static String parseRevisionString ( String fullBuildString ) { String build = "" ; String [ ] splitted = fullBuildString . split ( "=" , 2 ) ; if ( splitted . length == 2 ) { build = splitted [ 1 ] . trim ( ) ; if ( build . length ( ) == 0 ) { return null ; } return build ; } Pattern p = Pattern . compile ( "-(\\d*-\\w{8}(?:-.*)?)" ) ; Matcher m = p . matcher ( fullBuildString ) ; if ( ! m . find ( ) ) { return null ; } build = m . group ( 1 ) . trim ( ) ; if ( build . length ( ) == 0 ) { return null ; } return build ; }
Check that RevisionStrings are properly formatted .
32,223
public static Object [ ] parseVersionString ( String versionString ) { if ( versionString == null ) { return null ; } if ( versionString . matches ( "\\s" ) ) { return null ; } String [ ] split = versionString . split ( "\\." ) ; if ( split . length == 0 ) { return null ; } Object [ ] v = new Object [ split . length ] ; int i = 0 ; for ( String s : split ) { try { v [ i ] = Integer . parseInt ( s ) ; } catch ( NumberFormatException e ) { v [ i ] = s ; } i ++ ; } if ( v [ 0 ] instanceof Integer ) { return v ; } else { return null ; } }
Parse a version string in the form of x . y . z . It doesn t require that there are exactly three parts in the version . Each part must be separated by a dot .
32,224
public static int compareVersions ( Object [ ] left , Object [ ] right ) { if ( left == null || right == null ) { throw new IllegalArgumentException ( "Invalid versions" ) ; } for ( int i = 0 ; i < left . length ; i ++ ) { if ( right . length == i ) { return 1 ; } if ( left [ i ] instanceof Integer ) { if ( right [ i ] instanceof Integer ) { if ( ( ( Integer ) left [ i ] ) > ( ( Integer ) right [ i ] ) ) { return 1 ; } else if ( ( ( Integer ) left [ i ] ) < ( ( Integer ) right [ i ] ) ) { return - 1 ; } else { continue ; } } else { return 1 ; } } else if ( right [ i ] instanceof Integer ) { return - 1 ; } else { int cmp = ( ( String ) left [ i ] ) . compareTo ( ( String ) right [ i ] ) ; if ( cmp != 0 ) { return cmp ; } else { continue ; } } } if ( left . length < right . length ) { return - 1 ; } return 0 ; }
Compare two versions . Version should be represented as an array of integers .
32,225
public static boolean isPro ( ) { if ( m_isPro == null ) { if ( ! Boolean . parseBoolean ( System . getProperty ( "community" , "false" ) ) ) { m_isPro = ProClass . load ( "org.voltdb.CommandLogImpl" , "Command logging" , ProClass . HANDLER_IGNORE ) . hasProClass ( ) ; } else { m_isPro = false ; } } return m_isPro . booleanValue ( ) ; }
check if we re running pro code
32,226
public static final long cheesyBufferCheckSum ( ByteBuffer buffer ) { final int mypos = buffer . position ( ) ; buffer . position ( 0 ) ; long checksum = 0 ; if ( buffer . hasArray ( ) ) { final byte bytes [ ] = buffer . array ( ) ; final int end = buffer . arrayOffset ( ) + mypos ; for ( int ii = buffer . arrayOffset ( ) ; ii < end ; ii ++ ) { checksum += bytes [ ii ] ; } } else { for ( int ii = 0 ; ii < mypos ; ii ++ ) { checksum += buffer . get ( ) ; } } buffer . position ( mypos ) ; return checksum ; }
I heart commutativity
32,227
public static < T > T [ ] concatAll ( final T [ ] empty , Iterable < T [ ] > arrayList ) { assert ( empty . length == 0 ) ; if ( arrayList . iterator ( ) . hasNext ( ) == false ) { return empty ; } int len = 0 ; for ( T [ ] subArray : arrayList ) { len += subArray . length ; } int pos = 0 ; T [ ] result = Arrays . copyOf ( empty , len ) ; for ( T [ ] subArray : arrayList ) { System . arraycopy ( subArray , 0 , result , pos , subArray . length ) ; pos += subArray . length ; } return result ; }
Concatenate an list of arrays of typed - objects
32,228
public static long getMBRss ( Client client ) { assert ( client != null ) ; long rssMax = 0 ; try { ClientResponse r = client . callProcedure ( "@Statistics" , "MEMORY" , 0 ) ; VoltTable stats = r . getResults ( ) [ 0 ] ; stats . resetRowPosition ( ) ; while ( stats . advanceRow ( ) ) { long rss = stats . getLong ( "RSS" ) / 1024 ; if ( rss > rssMax ) { rssMax = rss ; } } return rssMax ; } catch ( Exception e ) { e . printStackTrace ( ) ; System . exit ( - 1 ) ; return 0 ; } }
Get the resident set size in mb for the voltdb server on the other end of the client . If the client is connected to multiple servers return the max individual rss across the cluster .
32,229
public static < K , V > Multimap < K , V > zipToMap ( List < K > keys , List < V > values ) { if ( keys . isEmpty ( ) || values . isEmpty ( ) ) { return null ; } Iterator < K > keyIter = keys . iterator ( ) ; Iterator < V > valueIter = values . iterator ( ) ; ArrayListMultimap < K , V > result = ArrayListMultimap . create ( ) ; while ( keyIter . hasNext ( ) && valueIter . hasNext ( ) ) { result . put ( keyIter . next ( ) , valueIter . next ( ) ) ; } K firstKey = keys . get ( 0 ) ; while ( valueIter . hasNext ( ) ) { result . put ( firstKey , valueIter . next ( ) ) ; } return result ; }
Zip the two lists up into a multimap
32,230
public static < K > List < K > zip ( Collection < Deque < K > > stuff ) { final List < K > result = Lists . newArrayList ( ) ; Iterator < Deque < K > > iter = stuff . iterator ( ) ; while ( iter . hasNext ( ) ) { final K next = iter . next ( ) . poll ( ) ; if ( next != null ) { result . add ( next ) ; } else { iter . remove ( ) ; } if ( ! iter . hasNext ( ) ) { iter = stuff . iterator ( ) ; } } return result ; }
Aggregates the elements from each of the given deque . It takes one element from the head of each deque in each loop and put them into a single list . This method modifies the deques in - place .
32,231
public static < K extends Comparable < ? > , V > ListMultimap < K , V > sortedArrayListMultimap ( ) { Map < K , Collection < V > > map = Maps . newTreeMap ( ) ; return Multimaps . newListMultimap ( map , new Supplier < List < V > > ( ) { public List < V > get ( ) { return Lists . newArrayList ( ) ; } } ) ; }
Create an ArrayListMultimap that uses TreeMap as the container map so order is preserved .
32,232
public static StoredProcedureInvocation roundTripForCL ( StoredProcedureInvocation invocation ) throws IOException { if ( invocation . getSerializedParams ( ) != null ) { return invocation ; } ByteBuffer buf = ByteBuffer . allocate ( invocation . getSerializedSize ( ) ) ; invocation . flattenToBuffer ( buf ) ; buf . flip ( ) ; StoredProcedureInvocation rti = new StoredProcedureInvocation ( ) ; rti . initFromBuffer ( buf ) ; return rti ; }
Serialize and then deserialize an invocation so that it has serializedParams set for command logging if the invocation is sent to a local site .
32,233
public static Map < Integer , byte [ ] > getBinaryPartitionKeys ( TheHashinator hashinator ) { Map < Integer , byte [ ] > partitionMap = new HashMap < > ( ) ; VoltTable partitionKeys = null ; if ( hashinator == null ) { partitionKeys = TheHashinator . getPartitionKeys ( VoltType . VARBINARY ) ; } else { partitionKeys = TheHashinator . getPartitionKeys ( hashinator , VoltType . VARBINARY ) ; } if ( partitionKeys == null ) { return null ; } else { ByteBuffer buf = ByteBuffer . allocate ( partitionKeys . getSerializedSize ( ) ) ; partitionKeys . flattenToBuffer ( buf ) ; buf . flip ( ) ; VoltTable keyCopy = PrivateVoltTableFactory . createVoltTableFromSharedBuffer ( buf ) ; while ( keyCopy . advanceRow ( ) ) { partitionMap . put ( ( int ) keyCopy . getLong ( 0 ) , keyCopy . getVarbinary ( 1 ) ) ; } } return partitionMap ; }
Get VARBINARY partition keys for the specified topology .
32,234
public static Properties readPropertiesFromCredentials ( String credentials ) { Properties props = new Properties ( ) ; File propFD = new File ( credentials ) ; if ( ! propFD . exists ( ) || ! propFD . isFile ( ) || ! propFD . canRead ( ) ) { throw new IllegalArgumentException ( "Credentials file " + credentials + " is not a read accessible file" ) ; } else { FileReader fr = null ; try { fr = new FileReader ( credentials ) ; props . load ( fr ) ; } catch ( IOException e ) { throw new IllegalArgumentException ( "Credential file not found or permission denied." ) ; } } return props ; }
Get username and password from credentials file .
32,235
public static int writeDeferredSerialization ( ByteBuffer mbuf , DeferredSerialization ds ) throws IOException { int written = 0 ; try { final int objStartPosition = mbuf . position ( ) ; ds . serialize ( mbuf ) ; written = mbuf . position ( ) - objStartPosition ; } finally { ds . cancel ( ) ; } return written ; }
Serialize the deferred serializer data into byte buffer
32,236
public NodeAVL getNode ( int index ) { NodeAVL n = nPrimaryNode ; while ( index -- > 0 ) { n = n . nNext ; } return n ; }
Returns the Node for a given Index using the ordinal position of the Index within the Table Object .
32,237
NodeAVL getNextNode ( NodeAVL n ) { if ( n == null ) { n = nPrimaryNode ; } else { n = n . nNext ; } return n ; }
Returns the Node for the next Index on this database row given the Node for any Index .
32,238
private boolean listACLEquals ( List < ACL > lista , List < ACL > listb ) { if ( lista . size ( ) != listb . size ( ) ) { return false ; } for ( int i = 0 ; i < lista . size ( ) ; i ++ ) { ACL a = lista . get ( i ) ; ACL b = listb . get ( i ) ; if ( ! a . equals ( b ) ) { return false ; } } return true ; }
compare two list of acls . if there elements are in the same order and the same size then return true else return false
32,239
public synchronized Long convertAcls ( List < ACL > acls ) { if ( acls == null ) return - 1L ; Long ret = aclKeyMap . get ( acls ) ; if ( ret != null ) return ret ; long val = incrementIndex ( ) ; longKeyMap . put ( val , acls ) ; aclKeyMap . put ( acls , val ) ; return val ; }
converts the list of acls to a list of longs .
32,240
public synchronized List < ACL > convertLong ( Long longVal ) { if ( longVal == null ) return null ; if ( longVal == - 1L ) return Ids . OPEN_ACL_UNSAFE ; List < ACL > acls = longKeyMap . get ( longVal ) ; if ( acls == null ) { LOG . error ( "ERROR: ACL not available for long " + longVal ) ; throw new RuntimeException ( "Failed to fetch acls for " + longVal ) ; } return acls ; }
converts a list of longs to a list of acls .
32,241
public long approximateDataSize ( ) { long result = 0 ; for ( Map . Entry < String , DataNode > entry : nodes . entrySet ( ) ) { DataNode value = entry . getValue ( ) ; synchronized ( value ) { result += entry . getKey ( ) . length ( ) ; result += ( value . data == null ? 0 : value . data . length ) ; } } return result ; }
Get the size of the nodes based on path and data length .
32,242
boolean isSpecialPath ( String path ) { if ( rootZookeeper . equals ( path ) || procZookeeper . equals ( path ) || quotaZookeeper . equals ( path ) ) { return true ; } return false ; }
is the path one of the special paths owned by zookeeper .
32,243
public void updateCount ( String lastPrefix , int diff ) { String statNode = Quotas . statPath ( lastPrefix ) ; DataNode node = nodes . get ( statNode ) ; StatsTrack updatedStat = null ; if ( node == null ) { LOG . error ( "Missing count node for stat " + statNode ) ; return ; } synchronized ( node ) { updatedStat = new StatsTrack ( new String ( node . data ) ) ; updatedStat . setCount ( updatedStat . getCount ( ) + diff ) ; node . data = updatedStat . toString ( ) . getBytes ( ) ; } String quotaNode = Quotas . quotaPath ( lastPrefix ) ; node = nodes . get ( quotaNode ) ; StatsTrack thisStats = null ; if ( node == null ) { LOG . error ( "Missing count node for quota " + quotaNode ) ; return ; } synchronized ( node ) { thisStats = new StatsTrack ( new String ( node . data ) ) ; } if ( thisStats . getCount ( ) < updatedStat . getCount ( ) ) { LOG . warn ( "Quota exceeded: " + lastPrefix + " count=" + updatedStat . getCount ( ) + " limit=" + thisStats . getCount ( ) ) ; } }
update the count of this stat datanode
32,244
public void deleteNode ( String path , long zxid ) throws KeeperException . NoNodeException { int lastSlash = path . lastIndexOf ( '/' ) ; String parentName = path . substring ( 0 , lastSlash ) ; String childName = path . substring ( lastSlash + 1 ) ; DataNode node = nodes . get ( path ) ; if ( node == null ) { throw new KeeperException . NoNodeException ( ) ; } nodes . remove ( path ) ; DataNode parent = nodes . get ( parentName ) ; if ( parent == null ) { throw new KeeperException . NoNodeException ( ) ; } synchronized ( parent ) { parent . removeChild ( childName ) ; parent . stat . setCversion ( parent . stat . getCversion ( ) + 1 ) ; parent . stat . setPzxid ( zxid ) ; long eowner = node . stat . getEphemeralOwner ( ) ; if ( eowner != 0 ) { HashSet < String > nodes = ephemerals . get ( eowner ) ; if ( nodes != null ) { synchronized ( nodes ) { nodes . remove ( path ) ; } } } node . parent = null ; } if ( parentName . startsWith ( procZookeeper ) ) { if ( Quotas . limitNode . equals ( childName ) ) { pTrie . deletePath ( parentName . substring ( quotaZookeeper . length ( ) ) ) ; } } String lastPrefix = pTrie . findMaxPrefix ( path ) ; if ( ! rootZookeeper . equals ( lastPrefix ) && ! ( "" . equals ( lastPrefix ) ) ) { updateCount ( lastPrefix , - 1 ) ; int bytes = 0 ; synchronized ( node ) { bytes = ( node . data == null ? 0 : - ( node . data . length ) ) ; } updateBytes ( lastPrefix , bytes ) ; } if ( LOG . isTraceEnabled ( ) ) { ZooTrace . logTraceMessage ( LOG , ZooTrace . EVENT_DELIVERY_TRACE_MASK , "dataWatches.triggerWatch " + path ) ; ZooTrace . logTraceMessage ( LOG , ZooTrace . EVENT_DELIVERY_TRACE_MASK , "childWatches.triggerWatch " + parentName ) ; } Set < Watcher > processed = dataWatches . triggerWatch ( path , EventType . NodeDeleted ) ; childWatches . triggerWatch ( path , EventType . NodeDeleted , processed ) ; childWatches . triggerWatch ( parentName . equals ( "" ) ? "/" : parentName , EventType . NodeChildrenChanged ) ; }
remove the path from the datatree
32,245
private void getCounts ( String path , Counts counts ) { DataNode node = getNode ( path ) ; if ( node == null ) { return ; } String [ ] children = null ; int len = 0 ; synchronized ( node ) { Set < String > childs = node . getChildren ( ) ; if ( childs != null ) { children = childs . toArray ( new String [ childs . size ( ) ] ) ; } len = ( node . data == null ? 0 : node . data . length ) ; } counts . count += 1 ; counts . bytes += len ; if ( children == null || children . length == 0 ) { return ; } for ( String child : children ) { getCounts ( path + "/" + child , counts ) ; } }
this method gets the count of nodes and the bytes under a subtree
32,246
private void updateQuotaForPath ( String path ) { Counts c = new Counts ( ) ; getCounts ( path , c ) ; StatsTrack strack = new StatsTrack ( ) ; strack . setBytes ( c . bytes ) ; strack . setCount ( c . count ) ; String statPath = Quotas . quotaZookeeper + path + "/" + Quotas . statNode ; DataNode node = getNode ( statPath ) ; if ( node == null ) { LOG . warn ( "Missing quota stat node " + statPath ) ; return ; } synchronized ( node ) { node . data = strack . toString ( ) . getBytes ( ) ; } }
update the quota for the given path
32,247
private void traverseNode ( String path ) { DataNode node = getNode ( path ) ; String children [ ] = null ; synchronized ( node ) { Set < String > childs = node . getChildren ( ) ; if ( childs != null ) { children = childs . toArray ( new String [ childs . size ( ) ] ) ; } } if ( children != null ) { if ( children . length == 0 ) { String endString = "/" + Quotas . limitNode ; if ( path . endsWith ( endString ) ) { String realPath = path . substring ( Quotas . quotaZookeeper . length ( ) , path . indexOf ( endString ) ) ; updateQuotaForPath ( realPath ) ; this . pTrie . addPath ( realPath ) ; } return ; } for ( String child : children ) { traverseNode ( path + "/" + child ) ; } } }
this method traverses the quota path and update the path trie and sets
32,248
private void setupQuota ( ) { String quotaPath = Quotas . quotaZookeeper ; DataNode node = getNode ( quotaPath ) ; if ( node == null ) { return ; } traverseNode ( quotaPath ) ; }
this method sets up the path trie and sets up stats for quota nodes
32,249
public void dumpEphemerals ( PrintWriter pwriter ) { Set < Long > keys = ephemerals . keySet ( ) ; pwriter . println ( "Sessions with Ephemerals (" + keys . size ( ) + "):" ) ; for ( long k : keys ) { pwriter . print ( "0x" + Long . toHexString ( k ) ) ; pwriter . println ( ":" ) ; HashSet < String > tmp = ephemerals . get ( k ) ; synchronized ( tmp ) { for ( String path : tmp ) { pwriter . println ( "\t" + path ) ; } } } }
Write a text dump of all the ephemerals in the datatree .
32,250
public int getCount ( ) throws InterruptedException , KeeperException { return ByteBuffer . wrap ( m_zk . getData ( m_path , false , null ) ) . getInt ( ) ; }
Returns the current count
32,251
public boolean isCountedDown ( ) throws InterruptedException , KeeperException { if ( countedDown ) return true ; int count = ByteBuffer . wrap ( m_zk . getData ( m_path , false , null ) ) . getInt ( ) ; if ( count > 0 ) return false ; countedDown = true ; return true ; }
Returns if already counted down to zero
32,252
private void copyTableSchemaFromShared ( ) { for ( SchemaColumn scol : m_sharedScan . getOutputSchema ( ) ) { SchemaColumn copy = new SchemaColumn ( scol . getTableName ( ) , getTableAlias ( ) , scol . getColumnName ( ) , scol . getColumnAlias ( ) , scol . getExpression ( ) , scol . getDifferentiator ( ) ) ; addOutputColumn ( copy ) ; } }
Copy the table schema from the shared part to here . We have to repair the table aliases .
32,253
public void harmonizeOutputSchema ( ) { boolean changedCurrent ; boolean changedBase ; boolean changedRecursive = false ; NodeSchema currentSchema = getOutputSchema ( ) ; NodeSchema baseSchema = getBestCostBasePlan ( ) . rootPlanGraph . getTrueOutputSchema ( false ) ; NodeSchema recursiveSchema = ( getBestCostRecursivePlan ( ) == null ) ? null : getBestCostRecursivePlan ( ) . rootPlanGraph . getTrueOutputSchema ( true ) ; changedCurrent = currentSchema . harmonize ( baseSchema , "Base Query" ) ; if ( recursiveSchema != null ) { boolean changedRec = currentSchema . harmonize ( recursiveSchema , "Recursive Query" ) ; changedCurrent = changedCurrent || changedRec ; } changedBase = baseSchema . harmonize ( currentSchema , "Base Query" ) ; if ( recursiveSchema != null ) { changedRecursive = recursiveSchema . harmonize ( currentSchema , "Recursive Query" ) ; } if ( changedBase ) { getBestCostBasePlan ( ) . rootPlanGraph . getTrueOutputSchema ( true ) ; } if ( changedRecursive ) { getBestCostRecursivePlan ( ) . rootPlanGraph . getTrueOutputSchema ( true ) ; } }
We have just planned the base query and perhaps the recursive query . We need to make sure that the output schema of the scan and the output schemas of the base and recursive plans are all compatible .
32,254
private static void complete ( AbstractFuture < ? > future ) { boolean maskExecutorExceptions = future . maskExecutorExceptions ; Listener next = null ; outer : while ( true ) { future . releaseWaiters ( ) ; future . afterDone ( ) ; next = future . clearListeners ( next ) ; future = null ; while ( next != null ) { Listener curr = next ; next = next . next ; Runnable task = curr . task ; if ( task instanceof AbstractFuture . SetFuture ) { AbstractFuture . SetFuture < ? > setFuture = ( AbstractFuture . SetFuture ) task ; future = setFuture . owner ; if ( future . value == setFuture ) { Object valueToSet = getFutureValue ( setFuture . future ) ; if ( ATOMIC_HELPER . casValue ( future , setFuture , valueToSet ) ) { continue outer ; } } } else { executeListener ( task , curr . executor , maskExecutorExceptions ) ; } } break ; } }
Unblocks all threads and runs all listeners .
32,255
public void addPath ( String path ) { if ( path == null ) { return ; } String [ ] pathComponents = path . split ( "/" ) ; TrieNode parent = rootNode ; String part = null ; if ( pathComponents . length <= 1 ) { throw new IllegalArgumentException ( "Invalid path " + path ) ; } for ( int i = 1 ; i < pathComponents . length ; i ++ ) { part = pathComponents [ i ] ; if ( parent . getChild ( part ) == null ) { parent . addChild ( part , new TrieNode ( parent ) ) ; } parent = parent . getChild ( part ) ; } parent . setProperty ( true ) ; }
add a path to the path trie
32,256
public void deletePath ( String path ) { if ( path == null ) { return ; } String [ ] pathComponents = path . split ( "/" ) ; TrieNode parent = rootNode ; String part = null ; if ( pathComponents . length <= 1 ) { throw new IllegalArgumentException ( "Invalid path " + path ) ; } for ( int i = 1 ; i < pathComponents . length ; i ++ ) { part = pathComponents [ i ] ; if ( parent . getChild ( part ) == null ) { return ; } parent = parent . getChild ( part ) ; LOG . info ( parent ) ; } TrieNode realParent = parent . getParent ( ) ; realParent . deleteChild ( part ) ; }
delete a path from the trie
32,257
public String findMaxPrefix ( String path ) { if ( path == null ) { return null ; } if ( "/" . equals ( path ) ) { return path ; } String [ ] pathComponents = path . split ( "/" ) ; TrieNode parent = rootNode ; List < String > components = new ArrayList < String > ( ) ; if ( pathComponents . length <= 1 ) { throw new IllegalArgumentException ( "Invalid path " + path ) ; } int i = 1 ; String part = null ; StringBuilder sb = new StringBuilder ( ) ; int lastindex = - 1 ; while ( ( i < pathComponents . length ) ) { if ( parent . getChild ( pathComponents [ i ] ) != null ) { part = pathComponents [ i ] ; parent = parent . getChild ( part ) ; components . add ( part ) ; if ( parent . getProperty ( ) ) { lastindex = i - 1 ; } } else { break ; } i ++ ; } for ( int j = 0 ; j < ( lastindex + 1 ) ; j ++ ) { sb . append ( "/" + components . get ( j ) ) ; } return sb . toString ( ) ; }
return the largest prefix for the input path .
32,258
public static VoltTable tableFromShorthand ( String schema ) { String name = "T" ; VoltTable . ColumnInfo [ ] columns = null ; Matcher nameMatcher = m_namePattern . matcher ( schema ) ; if ( nameMatcher . find ( ) ) { name = nameMatcher . group ( ) . trim ( ) ; } Matcher columnDataMatcher = m_columnsPattern . matcher ( schema ) ; if ( ! columnDataMatcher . find ( ) ) { throw new IllegalArgumentException ( "No column data found in shorthand" ) ; } String [ ] columnData = columnDataMatcher . group ( ) . trim ( ) . split ( "\\s*,\\s*" ) ; int columnCount = columnData . length ; columns = new VoltTable . ColumnInfo [ columnCount ] ; for ( int i = 0 ; i < columnCount ; i ++ ) { columns [ i ] = parseColumnShorthand ( columnData [ i ] , i ) ; } Matcher pkeyMatcher = m_pkeyPattern . matcher ( schema ) ; int [ ] pkeyIndexes = new int [ 0 ] ; if ( pkeyMatcher . find ( ) ) { String [ ] pkeyColData = pkeyMatcher . group ( ) . trim ( ) . split ( "\\s*,\\s*" ) ; pkeyIndexes = new int [ pkeyColData . length ] ; for ( int pkeyIndex = 0 ; pkeyIndex < pkeyColData . length ; pkeyIndex ++ ) { String pkeyCol = pkeyColData [ pkeyIndex ] ; if ( Character . isDigit ( pkeyCol . charAt ( 0 ) ) ) { int colIndex = Integer . parseInt ( pkeyCol ) ; pkeyIndexes [ pkeyIndex ] = colIndex ; } else { for ( int colIndex = 0 ; colIndex < columnCount ; colIndex ++ ) { if ( columns [ colIndex ] . name . equals ( pkeyCol ) ) { pkeyIndexes [ pkeyIndex ] = colIndex ; break ; } } } } } Matcher partitionMatcher = m_partitionPattern . matcher ( schema ) ; int partitionColumnIndex = - 1 ; if ( partitionMatcher . find ( ) ) { String partitionColStr = partitionMatcher . group ( ) . trim ( ) ; if ( Character . isDigit ( partitionColStr . charAt ( 0 ) ) ) { partitionColumnIndex = Integer . parseInt ( partitionColStr ) ; } else { for ( int colIndex = 0 ; colIndex < columnCount ; colIndex ++ ) { if ( columns [ colIndex ] . name . equals ( partitionColStr ) ) { partitionColumnIndex = colIndex ; break ; } } } assert ( partitionColumnIndex != - 1 ) : "Regex match here means there is a partitioning column" ; } VoltTable table = new VoltTable ( new VoltTable . ExtraMetadata ( name , partitionColumnIndex , pkeyIndexes , columns ) , columns , columns . length ) ; return table ; }
Parse the shorthand according to the syntax as described in the class comment .
32,259
private static void swap ( Object [ ] w , int a , int b ) { Object t = w [ a ] ; w [ a ] = w [ b ] ; w [ b ] = t ; }
Swaps the a th and b th elements of the specified Row array .
32,260
synchronized void insertRowInTable ( final VoltBulkLoaderRow nextRow ) throws InterruptedException { m_partitionRowQueue . put ( nextRow ) ; if ( m_partitionRowQueue . size ( ) == m_minBatchTriggerSize ) { m_es . execute ( new Runnable ( ) { public void run ( ) { try { while ( m_partitionRowQueue . size ( ) >= m_minBatchTriggerSize ) { loadTable ( buildTable ( ) , m_table ) ; } } catch ( Exception e ) { loaderLog . error ( "Failed to load batch" , e ) ; } } } ) ; } }
Synchronized so that when the a single batch is filled up we only queue one task to drain the queue . The task will drain the queue until it doesn t contain a single batch .
32,261
public static List < Field > getFields ( Class < ? > startClass ) { List < Field > currentClassFields = new ArrayList < Field > ( ) ; currentClassFields . addAll ( Arrays . asList ( startClass . getDeclaredFields ( ) ) ) ; Class < ? > parentClass = startClass . getSuperclass ( ) ; if ( parentClass != null ) { List < Field > parentClassFields = ( List < Field > ) getFields ( parentClass ) ; currentClassFields . addAll ( parentClassFields ) ; } return currentClassFields ; }
get all the fields including parents
32,262
public static synchronized void initialize ( int myHostId , CatalogContext catalogContext , HostMessenger messenger ) throws BundleException , IOException { ImporterStatsCollector statsCollector = new ImporterStatsCollector ( myHostId ) ; ImportManager em = new ImportManager ( myHostId , messenger , statsCollector ) ; VoltDB . instance ( ) . getStatsAgent ( ) . registerStatsSource ( StatsSelector . IMPORTER , myHostId , statsCollector ) ; m_self = em ; em . create ( catalogContext ) ; }
Create the singleton ImportManager and initialize .
32,263
private synchronized void create ( CatalogContext catalogContext ) { try { Map < String , ImportConfiguration > newProcessorConfig = loadNewConfigAndBundles ( catalogContext ) ; restartImporters ( newProcessorConfig ) ; } catch ( final Exception e ) { VoltDB . crashLocalVoltDB ( "Error creating import processor" , true , e ) ; } }
This creates a import connector from configuration provided .
32,264
private Map < String , ImportConfiguration > loadNewConfigAndBundles ( CatalogContext catalogContext ) { Map < String , ImportConfiguration > newProcessorConfig ; ImportType importElement = catalogContext . getDeployment ( ) . getImport ( ) ; if ( importElement == null || importElement . getConfiguration ( ) . isEmpty ( ) ) { newProcessorConfig = new HashMap < > ( ) ; } else { newProcessorConfig = CatalogUtil . getImportProcessorConfig ( importElement ) ; } Iterator < Map . Entry < String , ImportConfiguration > > iter = newProcessorConfig . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { String configName = iter . next ( ) . getKey ( ) ; ImportConfiguration importConfig = newProcessorConfig . get ( configName ) ; Properties properties = importConfig . getmoduleProperties ( ) ; String importBundleJar = properties . getProperty ( ImportDataProcessor . IMPORT_MODULE ) ; Preconditions . checkNotNull ( importBundleJar , "Import source is undefined or custom import plugin class missing." ) ; if ( ! importConfig . checkProcedures ( catalogContext , importLog , configName ) ) { iter . remove ( ) ; continue ; } boolean bundlePresent = loadImporterBundle ( properties ) ; if ( ! bundlePresent ) { iter . remove ( ) ; } } m_formatterFactories . clear ( ) ; for ( ImportConfiguration config : newProcessorConfig . values ( ) ) { Map < String , FormatterBuilder > formatters = config . getFormatterBuilders ( ) ; if ( formatters != null ) { try { for ( FormatterBuilder builder : formatters . values ( ) ) { String module = builder . getFormatterProperties ( ) . getProperty ( ImportDataProcessor . IMPORT_FORMATTER ) ; AbstractFormatterFactory formatterFactory = m_formatterFactories . get ( module ) ; if ( formatterFactory == null ) { URI moduleURI = URI . create ( module ) ; formatterFactory = m_moduleManager . getService ( moduleURI , AbstractFormatterFactory . class ) ; if ( formatterFactory == null ) { VoltDB . crashLocalVoltDB ( "Failed to initialize formatter from: " + module ) ; } m_formatterFactories . put ( module , formatterFactory ) ; } builder . setFormatterFactory ( formatterFactory ) ; } } catch ( Throwable t ) { VoltDB . crashLocalVoltDB ( "Failed to initialize formatter." ) ; } } } importLog . info ( "Final importer count:" + newProcessorConfig . size ( ) ) ; return newProcessorConfig ; }
Parses importer configs and loads the formatters and bundles needed into memory . This is used to generate a new configuration either to load or to compare with existing .
32,265
private boolean loadImporterBundle ( Properties moduleProperties ) { String importModuleName = moduleProperties . getProperty ( ImportDataProcessor . IMPORT_MODULE ) ; String attrs [ ] = importModuleName . split ( "\\|" ) ; String bundleJar = attrs [ 1 ] ; String moduleType = attrs [ 0 ] ; try { AbstractImporterFactory importerFactory = m_loadedBundles . get ( bundleJar ) ; if ( importerFactory == null ) { if ( moduleType . equalsIgnoreCase ( "osgi" ) ) { URI bundleURI = URI . create ( bundleJar ) ; importerFactory = m_moduleManager . getService ( bundleURI , AbstractImporterFactory . class ) ; if ( importerFactory == null ) { importLog . error ( "Failed to initialize importer from: " + bundleJar ) ; return false ; } } else { Class < ? > reference = this . getClass ( ) . getClassLoader ( ) . loadClass ( bundleJar ) ; if ( reference == null ) { importLog . error ( "Failed to initialize importer from: " + bundleJar ) ; return false ; } importerFactory = ( AbstractImporterFactory ) reference . newInstance ( ) ; } String importerType = importerFactory . getTypeName ( ) ; if ( importerType == null || importerType . trim ( ) . isEmpty ( ) ) { throw new RuntimeException ( "Importer must implement and return a valid unique name." ) ; } Preconditions . checkState ( ! m_importersByType . containsKey ( importerType ) , "Importer must implement and return a valid unique name: " + importerType ) ; m_importersByType . put ( importerType , importerFactory ) ; m_loadedBundles . put ( bundleJar , importerFactory ) ; } } catch ( Throwable t ) { importLog . error ( "Failed to configure import handler for " + bundleJar , t ) ; Throwables . propagate ( t ) ; } return true ; }
Checks if the module for importer has been loaded in the memory . If bundle doesn t exists it loades one and updates the mapping records of the bundles .
32,266
protected static void printCaughtException ( String exceptionMessage ) { if ( ++ countCaughtExceptions <= MAX_CAUGHT_EXCEPTION_MESSAGES ) { System . out . println ( exceptionMessage ) ; } if ( countCaughtExceptions == MAX_CAUGHT_EXCEPTION_MESSAGES ) { System . out . println ( "In NonVoltDBBackend, reached limit of " + MAX_CAUGHT_EXCEPTION_MESSAGES + " exception messages to be printed." ) ; } }
Print a message about an Exception that was caught ; but limit the number of such print messages so that the console is not swamped by them .
32,267
protected List < String > getAllColumns ( String tableName ) { List < String > columns = new ArrayList < String > ( ) ; try { ResultSet rs = dbconn . getMetaData ( ) . getColumns ( null , null , tableName . toLowerCase ( ) , null ) ; while ( rs . next ( ) ) { columns . add ( rs . getString ( 4 ) ) ; } } catch ( SQLException e ) { printCaughtException ( "In NonVoltDBBackend.getAllColumns, caught SQLException: " + e ) ; } return columns ; }
Returns all column names for the specified table in the order defined in the DDL .
32,268
protected List < String > getPrimaryKeys ( String tableName ) { List < String > pkCols = new ArrayList < String > ( ) ; try { ResultSet rs = dbconn . getMetaData ( ) . getPrimaryKeys ( null , null , tableName . toLowerCase ( ) ) ; while ( rs . next ( ) ) { pkCols . add ( rs . getString ( 4 ) ) ; } } catch ( SQLException e ) { printCaughtException ( "In NonVoltDBBackend.getPrimaryKeys, caught SQLException: " + e ) ; } return pkCols ; }
Returns all primary key column names for the specified table in the order defined in the DDL .
32,269
protected List < String > getNonPrimaryKeyColumns ( String tableName ) { List < String > columns = getAllColumns ( tableName ) ; columns . removeAll ( getPrimaryKeys ( tableName ) ) ; return columns ; }
Returns all non - primary - key column names for the specified table in the order defined in the DDL .
32,270
protected String transformQuery ( String query , QueryTransformer ... qts ) { String result = query ; for ( QueryTransformer qt : qts ) { result = transformQuery ( result , qt ) ; } return result ; }
Calls the transformQuery method above multiple times for each specified QueryTransformer .
32,271
static protected void printTransformedSql ( String originalSql , String modifiedSql ) { if ( transformedSqlFileWriter != null && ! originalSql . equals ( modifiedSql ) ) { try { transformedSqlFileWriter . write ( "original SQL: " + originalSql + "\n" ) ; transformedSqlFileWriter . write ( "modified SQL: " + modifiedSql + "\n" ) ; } catch ( IOException e ) { printCaughtException ( "Caught IOException:\n " + e + "\noriginal SQL: " + originalSql + "\nmodified SQL: " + modifiedSql ) ; } } }
Prints the original and modified SQL statements to the Transformed SQL output file assuming that that file is defined ; and only if the original and modified SQL are not the same i . e . only if some transformation has indeed taken place .
32,272
private static SQLPatternPart makeGroup ( boolean capture , String captureLabel , SQLPatternPart part ) { boolean alreadyGroup = ( part . m_flags & ( SQLPatternFactory . GROUP | SQLPatternFactory . CAPTURE ) ) != 0 ; SQLPatternPart retPart = alreadyGroup ? new SQLPatternPartElement ( part ) : part ; if ( capture ) { retPart . m_flags |= SQLPatternFactory . CAPTURE ; retPart . setCaptureLabel ( captureLabel ) ; } else { retPart . m_flags |= SQLPatternFactory . GROUP ; } return retPart ; }
Make a capturing or non - capturing group
32,273
public static HSQLInterface loadHsqldb ( ParameterStateManager psMgr ) { TimeZone . setDefault ( TimeZone . getTimeZone ( "GMT+0" ) ) ; String name = "hsqldbinstance-" + String . valueOf ( instanceId ) + "-" + String . valueOf ( System . currentTimeMillis ( ) ) ; instanceId ++ ; HsqlProperties props = new HsqlProperties ( ) ; try { Session sessionProxy = DatabaseManager . newSession ( DatabaseURL . S_MEM , name , "SA" , "" , props , 0 ) ; sessionProxy . executeDirectStatement ( "SET IGNORECASE TRUE;" ) ; sessionProxy . setParameterStateManager ( psMgr ) ; return new HSQLInterface ( sessionProxy ) ; } catch ( HsqlException caught ) { m_logger . warn ( "Unexpected error initializing the SQL parser" , caught ) ; caught . printStackTrace ( ) ; throw caught ; } }
Load up an HSQLDB in - memory instance .
32,274
public VoltXMLDiff runDDLCommandAndDiff ( HSQLDDLInfo stmtInfo , String ddl ) throws HSQLParseException { String expectedTableAffected = null ; boolean expectFailure = false ; Set < String > existingTableNames = null ; if ( stmtInfo != null ) { if ( stmtInfo . cascade ) { existingTableNames = getTableNames ( ) ; } if ( stmtInfo . noun == HSQLDDLInfo . Noun . INDEX ) { if ( stmtInfo . verb == HSQLDDLInfo . Verb . CREATE ) { expectedTableAffected = stmtInfo . secondName ; } else { expectedTableAffected = tableNameForIndexName ( stmtInfo . name ) ; } } else { expectedTableAffected = stmtInfo . name ; } expectFailure = ( expectedTableAffected == null ) && ! stmtInfo . ifexists ; } else { expectFailure = true ; } runDDLCommand ( ddl ) ; if ( expectFailure ) { throw new HSQLParseException ( "Unable to plan statement due to VoltDB DDL pre-processing error" ) ; } assert ( stmtInfo != null ) ; VoltXMLElement tableXMLNew = null , tableXMLOld = null ; if ( expectedTableAffected != null ) { tableXMLNew = getXMLForTable ( expectedTableAffected ) ; tableXMLOld = lastSchema . get ( expectedTableAffected ) ; } if ( tableXMLNew == null ) { tableXMLNew = emptySchema ; } if ( tableXMLOld == null ) { tableXMLOld = emptySchema ; } VoltXMLDiff diff = VoltXMLElement . computeDiff ( tableXMLOld , tableXMLNew ) ; if ( stmtInfo . cascade ) { Set < String > finalTableNames = getTableNames ( ) ; for ( String tableName : existingTableNames ) { if ( ! finalTableNames . contains ( tableName ) ) { tableName = tableName . toLowerCase ( ) ; tableXMLOld = lastSchema . get ( tableName ) . children . get ( 0 ) ; lastSchema . remove ( tableName ) ; if ( tableName . equals ( expectedTableAffected ) ) { continue ; } diff . m_removedElements . add ( tableXMLOld ) ; } } } diff . m_elementOrder . clear ( ) ; if ( expectedTableAffected != null ) { lastSchema . put ( expectedTableAffected , tableXMLNew . duplicate ( ) ) ; } return diff ; }
Modify the current schema with a SQL DDL command and get the diff which represents the changes .
32,275
public void runDDLCommand ( String ddl ) throws HSQLParseException { sessionProxy . clearLocalTables ( ) ; Result result = sessionProxy . executeDirectStatement ( ddl ) ; if ( result . hasError ( ) ) { throw new HSQLParseException ( result . getMainString ( ) ) ; } }
Modify the current schema with a SQL DDL command .
32,276
private void fixupInStatementExpressions ( VoltXMLElement expr ) throws HSQLParseException { if ( doesExpressionReallyMeanIn ( expr ) ) { inFixup ( expr ) ; } for ( VoltXMLElement child : expr . children ) { fixupInStatementExpressions ( child ) ; } }
Recursively find all in - lists subquery row comparisons found in the XML and munge them into the simpler thing we want to pass to the AbstractParsedStmt .
32,277
private void inFixup ( VoltXMLElement inElement ) { inElement . name = "operation" ; inElement . attributes . put ( "optype" , "in" ) ; VoltXMLElement rowElem = null ; VoltXMLElement tableElem = null ; VoltXMLElement subqueryElem = null ; VoltXMLElement valueElem = null ; for ( VoltXMLElement child : inElement . children ) { if ( child . name . equals ( "row" ) ) { rowElem = child ; } else if ( child . name . equals ( "table" ) ) { tableElem = child ; } else if ( child . name . equals ( "tablesubquery" ) ) { subqueryElem = child ; } else if ( child . name . equals ( "value" ) ) { valueElem = child ; } } VoltXMLElement inlist ; if ( tableElem != null ) { inlist = new VoltXMLElement ( "vector" ) ; for ( VoltXMLElement child : tableElem . children ) { assert ( child . name . equals ( "row" ) ) ; assert ( child . children . size ( ) == 1 ) ; inlist . children . addAll ( child . children ) ; } } else if ( subqueryElem != null ) { inlist = subqueryElem ; } else { assert valueElem != null ; inlist = valueElem ; } assert ( rowElem != null ) ; assert ( inlist != null ) ; inElement . children . clear ( ) ; inElement . children . add ( rowElem ) ; inElement . children . add ( inlist ) ; }
Take an equality - test expression that represents in - list and munge it into the simpler thing we want to output to the AbstractParsedStmt for its AbstractExpression classes .
32,278
@ SuppressWarnings ( "unused" ) private void printTables ( ) { try { String schemaName = sessionProxy . getSchemaName ( null ) ; System . out . println ( "*** Tables For Schema: " + schemaName + " ***" ) ; } catch ( HsqlException caught ) { caught . printStackTrace ( ) ; } HashMappedList hsqlTables = getHSQLTables ( ) ; for ( int i = 0 ; i < hsqlTables . size ( ) ; i ++ ) { Table table = ( Table ) hsqlTables . get ( i ) ; System . out . println ( table . getName ( ) . name ) ; } }
Debug - only method that prints out the names of all tables in the current schema .
32,279
public VoltXMLElement getXMLForTable ( String tableName ) throws HSQLParseException { VoltXMLElement xml = emptySchema . duplicate ( ) ; HashMappedList hsqlTables = getHSQLTables ( ) ; for ( int i = 0 ; i < hsqlTables . size ( ) ; i ++ ) { Table table = ( Table ) hsqlTables . get ( i ) ; String candidateTableName = table . getName ( ) . name ; if ( candidateTableName . equalsIgnoreCase ( tableName ) ) { VoltXMLElement vxmle = table . voltGetTableXML ( sessionProxy ) ; assert ( vxmle != null ) ; xml . children . add ( vxmle ) ; return xml ; } } return null ; }
Get a serialized XML representation of a particular table .
32,280
private void calculateTrackers ( Collection < TopicPartition > partitions ) { Map < TopicPartition , CommitTracker > trackers = new HashMap < > ( ) ; trackers . putAll ( m_trackerMap . get ( ) ) ; Map < TopicPartition , AtomicLong > lastCommittedOffSets = new HashMap < > ( ) ; lastCommittedOffSets . putAll ( m_lastCommittedOffSets . get ( ) ) ; boolean newTopicPartition = false ; for ( TopicPartition partition : partitions ) { if ( m_trackerMap . get ( ) . get ( partition ) != null ) { continue ; } newTopicPartition = true ; long startOffset = - 1L ; CommitTracker commitTracker = null ; if ( m_config . getCommitPolicy ( ) == KafkaCommitPolicy . TIME && m_config . getTriggerValue ( ) > 0 ) { commitTracker = new SimpleTracker ( ) ; } else { commitTracker = new DurableTracker ( KafkaConstants . IMPORT_GAP_LEAD , partition . topic ( ) , partition . partition ( ) , m_config . getGroupId ( ) ) ; } trackers . put ( partition , commitTracker ) ; try { OffsetAndMetadata offsetAndMetaData = m_consumer . committed ( partition ) ; startOffset = offsetAndMetaData != null ? offsetAndMetaData . offset ( ) : - 1L ; if ( startOffset > - 1L ) { commitTracker . resetTo ( startOffset ) ; } } catch ( KafkaException e ) { LOGGER . error ( "Failed to read committed offsets for group " + m_config . getGroupId ( ) + partition + " " + e . getMessage ( ) ) ; } lastCommittedOffSets . put ( partition , new AtomicLong ( startOffset ) ) ; m_pauseOffsets . put ( partition , new AtomicLong ( - 1 ) ) ; m_workTrackers . put ( partition , new PendingWorkTracker ( ) ) ; if ( LOGGER . isDebugEnabled ( ) ) { LOGGER . debug ( "Starting offset for group:" + m_config . getGroupId ( ) + ":" + startOffset + " partition:" + partition ) ; } } if ( newTopicPartition ) { m_trackerMap . set ( trackers ) ; m_lastCommittedOffSets . set ( lastCommittedOffSets ) ; } }
add trackers for new topic - partition in this importer
32,281
private void seek ( List < TopicPartition > seekList ) { for ( TopicPartition tp : seekList ) { AtomicLong lastCommittedOffset = m_lastCommittedOffSets . get ( ) . get ( tp ) ; if ( lastCommittedOffset != null && lastCommittedOffset . get ( ) > - 1L ) { AtomicLong lastSeeked = m_lastSeekedOffSets . get ( tp ) ; if ( lastSeeked != null && lastSeeked . get ( ) == lastCommittedOffset . get ( ) ) { continue ; } m_consumer . seek ( tp , lastCommittedOffset . longValue ( ) ) ; m_lastSeekedOffSets . put ( tp , new AtomicLong ( lastCommittedOffset . get ( ) ) ) ; if ( LOGGER . isDebugEnabled ( ) ) { LOGGER . debug ( "Moves offset for group " + m_config . getGroupId ( ) + " -" + tp + " to " + lastCommittedOffset ) ; } } } }
Move offsets to correct positions for next poll
32,282
public static String toZeroPaddedString ( long value , int precision , int maxSize ) { StringBuffer sb = new StringBuffer ( ) ; if ( value < 0 ) { value = - value ; } String s = Long . toString ( value ) ; if ( s . length ( ) > precision ) { s = s . substring ( precision ) ; } for ( int i = s . length ( ) ; i < precision ; i ++ ) { sb . append ( '0' ) ; } sb . append ( s ) ; if ( maxSize < precision ) { sb . setLength ( maxSize ) ; } return sb . toString ( ) ; }
If necessary adds zeros to the beginning of a value so that the total length matches the given precision otherwise trims the right digits . Then if maxSize is smaller than precision trims the right digits to maxSize . Negative values are treated as positive
32,283
public static String toLowerSubset ( String source , char substitute ) { int len = source . length ( ) ; StringBuffer sb = new StringBuffer ( len ) ; char ch ; for ( int i = 0 ; i < len ; i ++ ) { ch = source . charAt ( i ) ; if ( ! Character . isLetterOrDigit ( ch ) ) { sb . append ( substitute ) ; } else if ( ( i == 0 ) && Character . isDigit ( ch ) ) { sb . append ( substitute ) ; } else { sb . append ( Character . toLowerCase ( ch ) ) ; } } return sb . toString ( ) ; }
Returns a string with non alphanumeric chars converted to the substitute character . A digit first character is also converted . By sqlbob
32,284
public static String arrayToString ( Object array ) { int len = Array . getLength ( array ) ; int last = len - 1 ; StringBuffer sb = new StringBuffer ( 2 * ( len + 1 ) ) ; sb . append ( '{' ) ; for ( int i = 0 ; i < len ; i ++ ) { sb . append ( Array . get ( array , i ) ) ; if ( i != last ) { sb . append ( ',' ) ; } } sb . append ( '}' ) ; return sb . toString ( ) ; }
Builds a bracketed CSV list from the array
32,285
public static void appendPair ( StringBuffer b , String s1 , String s2 , String separator , String terminator ) { b . append ( s1 ) ; b . append ( separator ) ; b . append ( s2 ) ; b . append ( terminator ) ; }
Appends a pair of string to the string buffer using the separator between and terminator at the end
32,286
public static int rightTrimSize ( String s ) { int i = s . length ( ) ; while ( i > 0 ) { i -- ; if ( s . charAt ( i ) != ' ' ) { return i + 1 ; } } return 0 ; }
Returns the size of substring that does not contain any trailing spaces
32,287
public static int skipSpaces ( String s , int start ) { int limit = s . length ( ) ; int i = start ; for ( ; i < limit ; i ++ ) { if ( s . charAt ( i ) != ' ' ) { break ; } } return i ; }
Skips any spaces at or after start and returns the index of first non - space character ;
32,288
public static String [ ] split ( String s , String separator ) { HsqlArrayList list = new HsqlArrayList ( ) ; int currindex = 0 ; for ( boolean more = true ; more ; ) { int nextindex = s . indexOf ( separator , currindex ) ; if ( nextindex == - 1 ) { nextindex = s . length ( ) ; more = false ; } list . add ( s . substring ( currindex , nextindex ) ) ; currindex = nextindex + separator . length ( ) ; } return ( String [ ] ) list . toArray ( new String [ list . size ( ) ] ) ; }
Splits the string into an array using the separator . If separator is not found in the string the whole string is returned in the array .
32,289
protected void populateColumnSchema ( ArrayList < ColumnInfo > columns ) { super . populateColumnSchema ( columns ) ; columns . add ( new ColumnInfo ( VoltSystemProcedure . CNAME_SITE_ID , VoltSystemProcedure . CTYPE_ID ) ) ; columns . add ( new ColumnInfo ( Columns . PARTITION_ID , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . SOURCE_NAME , VoltType . STRING ) ) ; columns . add ( new ColumnInfo ( Columns . EXPORT_TARGET , VoltType . STRING ) ) ; columns . add ( new ColumnInfo ( Columns . ACTIVE , VoltType . STRING ) ) ; columns . add ( new ColumnInfo ( Columns . TUPLE_COUNT , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . TUPLE_PENDING , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . LAST_QUEUED_TIMESTAMP , VoltType . TIMESTAMP ) ) ; columns . add ( new ColumnInfo ( Columns . LAST_ACKED_TIMESTAMP , VoltType . TIMESTAMP ) ) ; columns . add ( new ColumnInfo ( Columns . AVERAGE_LATENCY , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . MAX_LATENCY , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . QUEUE_GAP , VoltType . BIGINT ) ) ; columns . add ( new ColumnInfo ( Columns . STATUS , VoltType . STRING ) ) ; }
Check cluster . py and checkstats . py if order of the columns is changed
32,290
private void offerInternal ( Mailbox mailbox , Item item , long handle ) { m_bufferedReads . add ( item ) ; releaseBufferedReads ( mailbox , handle ) ; }
SPI offers a new message .
32,291
public long sizeInBytes ( ) throws IOException { long memoryBlockUsage = 0 ; for ( StreamBlock b : m_memoryDeque ) { memoryBlockUsage += b . totalSize ( ) ; } return memoryBlockUsage + m_reader . sizeInBytes ( ) - ( StreamBlock . HEADER_SIZE * m_reader . getNumObjects ( ) ) ; }
Only used in tests should be removed .
32,292
public void truncateToSequenceNumber ( final long truncationSeqNo ) throws IOException { assert ( m_memoryDeque . isEmpty ( ) ) ; m_persistentDeque . parseAndTruncate ( new BinaryDequeTruncator ( ) { public TruncatorResponse parse ( BBContainer bbc ) { ByteBuffer b = bbc . b ( ) ; ByteOrder endianness = b . order ( ) ; b . order ( ByteOrder . LITTLE_ENDIAN ) ; try { final long startSequenceNumber = b . getLong ( ) ; if ( startSequenceNumber > truncationSeqNo ) { return PersistentBinaryDeque . fullTruncateResponse ( ) ; } final long committedSequenceNumber = b . getLong ( ) ; final int tupleCountPos = b . position ( ) ; final int tupleCount = b . getInt ( ) ; final long lastSequenceNumber = startSequenceNumber + tupleCount - 1 ; if ( lastSequenceNumber <= truncationSeqNo ) { return null ; } b . getLong ( ) ; int offset = 0 ; while ( b . hasRemaining ( ) ) { if ( startSequenceNumber + offset > truncationSeqNo ) { b . limit ( b . position ( ) ) ; b . putInt ( tupleCountPos , offset - 1 ) ; b . position ( 0 ) ; return new ByteBufferTruncatorResponse ( b ) ; } offset ++ ; final int rowLength = b . getInt ( ) ; b . position ( b . position ( ) + rowLength ) ; } return null ; } finally { b . order ( endianness ) ; } } } ) ; m_persistentDeque . close ( ) ; CatalogContext catalogContext = VoltDB . instance ( ) . getCatalogContext ( ) ; Table streamTable = VoltDB . instance ( ) . getCatalogContext ( ) . database . getTables ( ) . get ( m_streamName ) ; StreamTableSchemaSerializer ds = new StreamTableSchemaSerializer ( streamTable , m_streamName , catalogContext . m_genId ) ; m_persistentDeque = new PersistentBinaryDeque ( m_nonce , ds , new VoltFile ( m_path ) , exportLog , ! DISABLE_COMPRESSION ) ; m_reader = m_persistentDeque . openForRead ( m_nonce ) ; exportLog . info ( "After truncate, PBD size is " + ( m_reader . sizeInBytes ( ) - ( 8 * m_reader . getNumObjects ( ) ) ) ) ; }
See PDB segment layout at beginning of this file .
32,293
public int set ( int pos ) { while ( pos >= capacity ) { doubleCapacity ( ) ; } if ( pos >= limitPos ) { limitPos = pos + 1 ; } int windex = pos >> 5 ; int mask = 0x80000000 >>> ( pos & 0x1F ) ; int word = map [ windex ] ; int result = ( word & mask ) == 0 ? 0 : 1 ; map [ windex ] = ( word | mask ) ; return result ; }
Sets pos and returns old value
32,294
public static void and ( byte [ ] map , int pos , byte source , int count ) { int shift = pos & 0x07 ; int mask = ( source & 0xff ) >>> shift ; int innermask = 0xff >> shift ; int index = pos / 8 ; if ( count < 8 ) { innermask = innermask >>> ( 8 - count ) ; innermask = innermask << ( 8 - count ) ; } mask &= innermask ; innermask = ~ innermask ; if ( index >= map . length ) { return ; } byte b = map [ index ] ; map [ index ] = ( byte ) ( b & innermask ) ; b = ( byte ) ( b & mask ) ; map [ index ] = ( byte ) ( map [ index ] | b ) ; if ( shift == 0 ) { return ; } shift = 8 - shift ; if ( count > shift ) { mask = ( ( source & 0xff ) << 8 ) >>> shift ; innermask = 0xff00 >>> shift ; innermask = ~ innermask ; b = map [ index + 1 ] ; map [ index + 1 ] = ( byte ) ( b & innermask ) ; b = ( byte ) ( b & mask ) ; map [ index + 1 ] = ( byte ) ( map [ index + 1 ] | b ) ; } }
AND count bits from source with map contents starting at pos
32,295
public static void or ( byte [ ] map , int pos , byte source , int count ) { int shift = pos & 0x07 ; int mask = ( source & 0xff ) >>> shift ; int index = pos / 8 ; if ( index >= map . length ) { return ; } byte b = ( byte ) ( map [ index ] | mask ) ; map [ index ] = b ; if ( shift == 0 ) { return ; } shift = 8 - shift ; if ( count > shift ) { mask = ( ( source & 0xff ) << 8 ) >>> shift ; b = ( byte ) ( map [ index + 1 ] | mask ) ; map [ index + 1 ] = b ; } }
OR count bits from source with map contents starting at pos
32,296
public synchronized boolean addUnsorted ( int key , int value ) { if ( count == capacity ) { if ( fixedSize ) { return false ; } else { doubleCapacity ( ) ; } } if ( sorted && count != 0 ) { if ( sortOnValues ) { if ( value < values [ count - 1 ] ) { sorted = false ; } } else { if ( value < keys [ count - 1 ] ) { sorted = false ; } } } hasChanged = true ; keys [ count ] = key ; values [ count ] = value ; count ++ ; return true ; }
Adds a pair into the table .
32,297
public synchronized boolean addUnique ( int key , int value ) { if ( count == capacity ) { if ( fixedSize ) { return false ; } else { doubleCapacity ( ) ; } } if ( ! sorted ) { fastQuickSort ( ) ; } targetSearchValue = sortOnValues ? value : key ; int i = binaryEmptySlotSearch ( ) ; if ( i == - 1 ) { return false ; } hasChanged = true ; if ( count != i ) { moveRows ( i , i + 1 , count - i ) ; } keys [ i ] = key ; values [ i ] = value ; count ++ ; return true ; }
Adds a pair ensuring no duplicate key xor value already exists in the current search target column .
32,298
private int binaryFirstSearch ( ) { int low = 0 ; int high = count ; int mid = 0 ; int compare = 0 ; int found = count ; while ( low < high ) { mid = ( low + high ) / 2 ; compare = compare ( mid ) ; if ( compare < 0 ) { high = mid ; } else if ( compare > 0 ) { low = mid + 1 ; } else { high = mid ; found = mid ; } } return found == count ? - 1 : found ; }
Returns the index of the lowest element == the given search target or - 1
32,299
private int binaryGreaterSearch ( ) { int low = 0 ; int high = count ; int mid = 0 ; int compare = 0 ; while ( low < high ) { mid = ( low + high ) / 2 ; compare = compare ( mid ) ; if ( compare < 0 ) { high = mid ; } else { low = mid + 1 ; } } return low == count ? - 1 : low ; }
Returns the index of the lowest element > the given search target