idx
int64
0
41.2k
question
stringlengths
83
4.15k
target
stringlengths
5
715
31,200
public ResultSet getCatalogs ( ) throws SQLException { checkClosed ( ) ; VoltTable result = new VoltTable ( new VoltTable . ColumnInfo ( "TABLE_CAT" , VoltType . STRING ) ) ; result . addRow ( new Object [ ] { catalogString } ) ; return new JDBC4ResultSet ( null , result ) ; }
Retrieves the catalog names available in this database .
31,201
public int getDatabaseMajorVersion ( ) throws SQLException { checkClosed ( ) ; System . out . println ( "\n\n\nVERSION: " + versionString ) ; return Integer . valueOf ( versionString . split ( "\\." ) [ 0 ] ) ; }
Retrieves the major version number of the underlying database .
31,202
public ResultSet getFunctions ( String catalog , String schemaPattern , String functionNamePattern ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves a description of the system and user functions available in the given catalog .
31,203
public ResultSet getPrimaryKeys ( String catalog , String schema , String table ) throws SQLException { assert ( table != null && ! table . isEmpty ( ) ) ; checkClosed ( ) ; this . sysCatalog . setString ( 1 , "PRIMARYKEYS" ) ; JDBC4ResultSet res = ( JDBC4ResultSet ) this . sysCatalog . executeQuery ( ) ; VoltTable vtable = res . getVoltTable ( ) . clone ( 0 ) ; while ( res . next ( ) ) { if ( res . getString ( "TABLE_NAME" ) . equals ( table ) ) { vtable . addRow ( res . getRowData ( ) ) ; } } return new JDBC4ResultSet ( sysCatalog , vtable ) ; }
Retrieves a description of the given table s primary key columns .
31,204
public ResultSet getSchemas ( ) throws SQLException { checkClosed ( ) ; VoltTable vtable = new VoltTable ( new ColumnInfo ( "TABLE_SCHEM" , VoltType . STRING ) , new ColumnInfo ( "TABLE_CATALOG" , VoltType . STRING ) ) ; JDBC4ResultSet res = new JDBC4ResultSet ( this . sysCatalog , vtable ) ; return res ; }
Retrieves the schema names available in this database .
31,205
public ResultSet getTablePrivileges ( String catalog , String schemaPattern , String tableNamePattern ) throws SQLException { checkClosed ( ) ; VoltTable vtable = new VoltTable ( new ColumnInfo ( "TABLE_CAT" , VoltType . STRING ) , new ColumnInfo ( "TABLE_SCHEM" , VoltType . STRING ) , new ColumnInfo ( "TABLE_NAME" , VoltType . STRING ) , new ColumnInfo ( "GRANTOR" , VoltType . STRING ) , new ColumnInfo ( "GRANTEE" , VoltType . STRING ) , new ColumnInfo ( "PRIVILEGE" , VoltType . STRING ) , new ColumnInfo ( "IS_GRANTABLE" , VoltType . STRING ) ) ; JDBC4ResultSet res = new JDBC4ResultSet ( this . sysCatalog , vtable ) ; return res ; }
Retrieves a description of the access rights for each table available in a catalog .
31,206
public static Pattern computeJavaPattern ( String sqlPattern ) { StringBuffer pattern_buff = new StringBuffer ( ) ; for ( int i = 0 ; i < sqlPattern . length ( ) ; i ++ ) { char c = sqlPattern . charAt ( i ) ; if ( c == '_' ) { pattern_buff . append ( '.' ) ; } else if ( c == '%' ) { pattern_buff . append ( ".*" ) ; } else pattern_buff . append ( c ) ; } return Pattern . compile ( pattern_buff . toString ( ) ) ; }
Convert the users VoltDB SQL pattern into a regex pattern
31,207
public ResultSet getTables ( String catalog , String schemaPattern , String tableNamePattern , String [ ] types ) throws SQLException { checkClosed ( ) ; this . sysCatalog . setString ( 1 , "TABLES" ) ; JDBC4ResultSet res = ( JDBC4ResultSet ) this . sysCatalog . executeQuery ( ) ; VoltTable vtable = res . getVoltTable ( ) . clone ( 0 ) ; List < String > typeStrings = null ; if ( types != null ) { typeStrings = Arrays . asList ( types ) ; } if ( tableNamePattern == null || tableNamePattern . length ( ) == 0 ) { tableNamePattern = "%" ; } Pattern table_pattern = computeJavaPattern ( tableNamePattern ) ; while ( res . next ( ) ) { if ( typeStrings == null || typeStrings . contains ( res . getString ( "TABLE_TYPE" ) ) ) { Matcher table_matcher = table_pattern . matcher ( res . getString ( "TABLE_NAME" ) ) ; if ( table_matcher . matches ( ) ) { vtable . addRow ( res . getRowData ( ) ) ; } } } return new JDBC4ResultSet ( this . sysCatalog , vtable ) ; }
Retrieves a description of the tables available in the given catalog .
31,208
public ResultSet getTableTypes ( ) throws SQLException { checkClosed ( ) ; VoltTable vtable = new VoltTable ( new ColumnInfo ( "TABLE_TYPE" , VoltType . STRING ) ) ; for ( String type : tableTypes ) { vtable . addRow ( type ) ; } JDBC4ResultSet res = new JDBC4ResultSet ( this . sysCatalog , vtable ) ; return res ; }
Retrieves the table types available in this database .
31,209
public ResultSet getTypeInfo ( ) throws SQLException { checkClosed ( ) ; this . sysCatalog . setString ( 1 , "TYPEINFO" ) ; ResultSet res = this . sysCatalog . executeQuery ( ) ; return res ; }
Retrieves a description of all the data types supported by this database .
31,210
public ResultSet getVersionColumns ( String catalog , String schema , String table ) throws SQLException { checkClosed ( ) ; throw SQLError . noSupport ( ) ; }
Retrieves a description of a table s columns that are automatically updated when any value in a row is updated .
31,211
public boolean supportsConvert ( int fromType , int toType ) throws SQLException { checkClosed ( ) ; switch ( fromType ) { case java . sql . Types . VARCHAR : case java . sql . Types . VARBINARY : case java . sql . Types . TIMESTAMP : case java . sql . Types . OTHER : switch ( toType ) { case java . sql . Types . VARCHAR : return true ; default : return false ; } case java . sql . Types . TINYINT : case java . sql . Types . SMALLINT : case java . sql . Types . INTEGER : case java . sql . Types . BIGINT : case java . sql . Types . FLOAT : case java . sql . Types . DECIMAL : switch ( toType ) { case java . sql . Types . VARCHAR : case java . sql . Types . TINYINT : case java . sql . Types . SMALLINT : case java . sql . Types . INTEGER : case java . sql . Types . BIGINT : case java . sql . Types . FLOAT : case java . sql . Types . DECIMAL : return true ; default : return false ; } default : return false ; } }
Retrieves whether this database supports the JDBC scalar function CONVERT for conversions between the JDBC types fromType and toType .
31,212
public boolean supportsResultSetType ( int type ) throws SQLException { checkClosed ( ) ; if ( type == ResultSet . TYPE_SCROLL_INSENSITIVE ) return true ; return false ; }
Retrieves whether this database supports the given result set type .
31,213
public static boolean isInProcessDatabaseType ( String url ) { if ( url == S_FILE || url == S_RES || url == S_MEM ) { return true ; } return false ; }
Returns true if type represents an in - process connection to database .
31,214
public T nextReady ( long systemCurrentTimeMillis ) { if ( delayed . size ( ) == 0 ) { return null ; } if ( delayed . firstKey ( ) > systemCurrentTimeMillis ) { return null ; } Entry < Long , Object [ ] > entry = delayed . pollFirstEntry ( ) ; Object [ ] values = entry . getValue ( ) ; @ SuppressWarnings ( "unchecked" ) T value = ( T ) values [ 0 ] ; if ( values . length > 1 ) { int prevLength = values . length ; values = Arrays . copyOfRange ( values , 1 , values . length ) ; assert ( values . length == prevLength - 1 ) ; delayed . put ( entry . getKey ( ) , values ) ; } m_size -- ; return value ; }
Return the next object that is safe for delivery or null if there are no safe objects to deliver .
31,215
private static byte [ ] readCatalog ( String catalogUrl ) throws IOException { assert ( catalogUrl != null ) ; final int MAX_CATALOG_SIZE = 40 * 1024 * 1024 ; InputStream fin = null ; try { URL url = new URL ( catalogUrl ) ; fin = url . openStream ( ) ; } catch ( MalformedURLException ex ) { fin = new FileInputStream ( catalogUrl ) ; } byte [ ] buffer = new byte [ MAX_CATALOG_SIZE ] ; int readBytes = 0 ; int totalBytes = 0 ; try { while ( readBytes >= 0 ) { totalBytes += readBytes ; readBytes = fin . read ( buffer , totalBytes , buffer . length - totalBytes - 1 ) ; } } finally { fin . close ( ) ; } return Arrays . copyOf ( buffer , totalBytes ) ; }
Read catalog bytes from URL
31,216
synchronized public void close ( ) { closed = true ; if ( sqw != null ) { try { if ( layoutHeaderChecked && layout != null && layout . getFooter ( ) != null ) { sendLayoutMessage ( layout . getFooter ( ) ) ; } sqw . close ( ) ; sqw = null ; } catch ( java . io . IOException ex ) { sqw = null ; } } }
Release any resources held by this SyslogAppender .
31,217
public static int getFacility ( String facilityName ) { if ( facilityName != null ) { facilityName = facilityName . trim ( ) ; } if ( "KERN" . equalsIgnoreCase ( facilityName ) ) { return LOG_KERN ; } else if ( "USER" . equalsIgnoreCase ( facilityName ) ) { return LOG_USER ; } else if ( "MAIL" . equalsIgnoreCase ( facilityName ) ) { return LOG_MAIL ; } else if ( "DAEMON" . equalsIgnoreCase ( facilityName ) ) { return LOG_DAEMON ; } else if ( "AUTH" . equalsIgnoreCase ( facilityName ) ) { return LOG_AUTH ; } else if ( "SYSLOG" . equalsIgnoreCase ( facilityName ) ) { return LOG_SYSLOG ; } else if ( "LPR" . equalsIgnoreCase ( facilityName ) ) { return LOG_LPR ; } else if ( "NEWS" . equalsIgnoreCase ( facilityName ) ) { return LOG_NEWS ; } else if ( "UUCP" . equalsIgnoreCase ( facilityName ) ) { return LOG_UUCP ; } else if ( "CRON" . equalsIgnoreCase ( facilityName ) ) { return LOG_CRON ; } else if ( "AUTHPRIV" . equalsIgnoreCase ( facilityName ) ) { return LOG_AUTHPRIV ; } else if ( "FTP" . equalsIgnoreCase ( facilityName ) ) { return LOG_FTP ; } else if ( "LOCAL0" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL0 ; } else if ( "LOCAL1" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL1 ; } else if ( "LOCAL2" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL2 ; } else if ( "LOCAL3" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL3 ; } else if ( "LOCAL4" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL4 ; } else if ( "LOCAL5" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL5 ; } else if ( "LOCAL6" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL6 ; } else if ( "LOCAL7" . equalsIgnoreCase ( facilityName ) ) { return LOG_LOCAL7 ; } else { return - 1 ; } }
Returns the integer value corresponding to the named syslog facility or - 1 if it couldn t be recognized .
31,218
public void activateOptions ( ) { if ( header ) { getLocalHostname ( ) ; } if ( layout != null && layout . getHeader ( ) != null ) { sendLayoutMessage ( layout . getHeader ( ) ) ; } layoutHeaderChecked = true ; }
This method returns immediately as options are activated when they are set .
31,219
private String getPacketHeader ( final long timeStamp ) { if ( header ) { StringBuffer buf = new StringBuffer ( dateFormat . format ( new Date ( timeStamp ) ) ) ; if ( buf . charAt ( 4 ) == '0' ) { buf . setCharAt ( 4 , ' ' ) ; } buf . append ( getLocalHostname ( ) ) ; buf . append ( ' ' ) ; return buf . toString ( ) ; } return "" ; }
Gets HEADER portion of packet .
31,220
private void sendLayoutMessage ( final String msg ) { if ( sqw != null ) { String packet = msg ; String hdr = getPacketHeader ( new Date ( ) . getTime ( ) ) ; if ( facilityPrinting || hdr . length ( ) > 0 ) { StringBuffer buf = new StringBuffer ( hdr ) ; if ( facilityPrinting ) { buf . append ( facilityStr ) ; } buf . append ( msg ) ; packet = buf . toString ( ) ; } sqw . setLevel ( 6 ) ; sqw . write ( packet ) ; } }
Set header or footer of layout .
31,221
protected byte [ ] getGZipData ( ) throws SQLException { byte [ ] bytes = gZipData ( ) ; if ( bytes != null ) { return bytes ; } if ( ( this . outputStream == null ) || ! this . outputStream . isClosed ( ) || this . outputStream . isFreed ( ) ) { throw Exceptions . notReadable ( ) ; } try { setGZipData ( this . outputStream . toByteArray ( ) ) ; return gZipData ( ) ; } catch ( IOException ex ) { throw Exceptions . notReadable ( ) ; } finally { this . freeOutputStream ( ) ; } }
Retrieves this object s SQLXML value as a gzipped array of bytes possibly by terminating any in - progress write operations and converting accumulated intermediate data .
31,222
protected synchronized void close ( ) { this . closed = true ; setReadable ( false ) ; setWritable ( false ) ; freeOutputStream ( ) ; freeInputStream ( ) ; this . gzdata = null ; }
closes this object and releases the resources that it holds .
31,223
protected < T extends Result > T createResult ( Class < T > resultClass ) throws SQLException { checkWritable ( ) ; setWritable ( false ) ; setReadable ( true ) ; if ( JAXBResult . class . isAssignableFrom ( resultClass ) ) { } else if ( ( resultClass == null ) || StreamResult . class . isAssignableFrom ( resultClass ) ) { return createStreamResult ( resultClass ) ; } else if ( DOMResult . class . isAssignableFrom ( resultClass ) ) { return createDOMResult ( resultClass ) ; } else if ( SAXResult . class . isAssignableFrom ( resultClass ) ) { return createSAXResult ( resultClass ) ; } else if ( StAXResult . class . isAssignableFrom ( resultClass ) ) { return createStAXResult ( resultClass ) ; } throw Util . invalidArgument ( "resultClass: " + resultClass ) ; }
Retrieves a new Result for setting the XML value designated by this SQLXML instance .
31,224
@ SuppressWarnings ( "unchecked" ) protected < T extends Result > T createSAXResult ( Class < T > resultClass ) throws SQLException { SAXResult result = null ; try { result = ( resultClass == null ) ? new SAXResult ( ) : ( SAXResult ) resultClass . newInstance ( ) ; } catch ( SecurityException ex ) { throw Exceptions . resultInstantiation ( ex ) ; } catch ( InstantiationException ex ) { throw Exceptions . resultInstantiation ( ex ) ; } catch ( IllegalAccessException ex ) { throw Exceptions . resultInstantiation ( ex ) ; } catch ( ClassCastException ex ) { throw Exceptions . resultInstantiation ( ex ) ; } StAXResult staxResult = createStAXResult ( null ) ; XMLStreamWriter xmlWriter = staxResult . getXMLStreamWriter ( ) ; SAX2XMLStreamWriter handler = new SAX2XMLStreamWriter ( xmlWriter ) ; result . setHandler ( handler ) ; return ( T ) result ; }
Retrieves a new SAXResult for setting the XML value designated by this SQLXML instance .
31,225
public List < AbstractExpression > bindingToIndexedExpression ( AbstractExpression expr ) { if ( equals ( expr ) ) { return s_reusableImmutableEmptyBinding ; } return null ; }
Otherwise there is no binding possible indicated by a null return .
31,226
public static Client getClient ( ClientConfig config , String [ ] servers , int port ) throws Exception { config . setTopologyChangeAware ( true ) ; final Client client = ClientFactory . createClient ( config ) ; for ( String server : servers ) { try { client . createConnection ( server . trim ( ) , port ) ; break ; } catch ( IOException e ) { } } if ( client . getConnectedHostList ( ) . isEmpty ( ) ) { throw new Exception ( "Unable to connect to any servers." ) ; } return client ; }
Get connection to servers in cluster .
31,227
public synchronized void addAdapter ( int pid , InternalClientResponseAdapter adapter ) { final ImmutableMap . Builder < Integer , InternalClientResponseAdapter > builder = ImmutableMap . builder ( ) ; builder . putAll ( m_adapters ) ; builder . put ( pid , adapter ) ; m_adapters = builder . build ( ) ; }
Synchronized in case multiple partitions are added concurrently .
31,228
public boolean hasTable ( String name ) { Table table = getCatalogContext ( ) . tables . get ( name ) ; return ( table != null ) ; }
Returns true if a table with the given name exists in the server catalog .
31,229
public boolean callProcedure ( InternalConnectionContext caller , Function < Integer , Boolean > backPressurePredicate , InternalConnectionStatsCollector statsCollector , ProcedureCallback procCallback , String proc , Object ... fieldList ) { Procedure catProc = InvocationDispatcher . getProcedureFromName ( proc , getCatalogContext ( ) ) ; if ( catProc == null ) { String fmt = "Cannot invoke procedure %s from streaming interface %s. Procedure not found." ; m_logger . rateLimitedLog ( SUPPRESS_INTERVAL , Level . ERROR , null , fmt , proc , caller ) ; m_failedCount . incrementAndGet ( ) ; return false ; } StoredProcedureInvocation task = new StoredProcedureInvocation ( ) ; task . setProcName ( proc ) ; task . setParams ( fieldList ) ; try { task = MiscUtils . roundTripForCL ( task ) ; } catch ( Exception e ) { String fmt = "Cannot invoke procedure %s from streaming interface %s. failed to create task." ; m_logger . rateLimitedLog ( SUPPRESS_INTERVAL , Level . ERROR , null , fmt , proc , caller ) ; m_failedCount . incrementAndGet ( ) ; return false ; } int [ ] partitions = null ; try { partitions = InvocationDispatcher . getPartitionsForProcedure ( catProc , task ) ; } catch ( Exception e ) { String fmt = "Can not invoke procedure %s from streaming interface %s. Partition not found." ; m_logger . rateLimitedLog ( SUPPRESS_INTERVAL , Level . ERROR , e , fmt , proc , caller ) ; m_failedCount . incrementAndGet ( ) ; return false ; } boolean mp = ( partitions [ 0 ] == MpInitiator . MP_INIT_PID ) || ( partitions . length > 1 ) ; final InternalClientResponseAdapter adapter = mp ? m_adapters . get ( MpInitiator . MP_INIT_PID ) : m_adapters . get ( partitions [ 0 ] ) ; InternalAdapterTaskAttributes kattrs = new InternalAdapterTaskAttributes ( caller , adapter . connectionId ( ) ) ; final AuthUser user = getCatalogContext ( ) . authSystem . getImporterUser ( ) ; if ( ! adapter . createTransaction ( kattrs , proc , catProc , procCallback , statsCollector , task , user , partitions , false , backPressurePredicate ) ) { m_failedCount . incrementAndGet ( ) ; return false ; } m_submitSuccessCount . incrementAndGet ( ) ; return true ; }
Use null backPressurePredicate for no back pressure
31,230
synchronized void registerService ( Promotable service ) { m_services . add ( service ) ; if ( m_isLeader ) { try { service . acceptPromotion ( ) ; } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( "Unable to promote global service." , true , e ) ; } } }
Add a service to be notified if this node becomes the global leader
31,231
void resolveTypesForCaseWhen ( Session session ) { if ( dataType != null ) { return ; } Expression expr = this ; while ( expr . opType == OpTypes . CASEWHEN ) { expr . nodes [ LEFT ] . resolveTypes ( session , expr ) ; if ( expr . nodes [ LEFT ] . isParam ) { expr . nodes [ LEFT ] . dataType = Type . SQL_BOOLEAN ; } expr . nodes [ RIGHT ] . nodes [ LEFT ] . resolveTypes ( session , nodes [ RIGHT ] ) ; expr . nodes [ RIGHT ] . nodes [ RIGHT ] . resolveTypes ( session , nodes [ RIGHT ] ) ; expr = expr . nodes [ RIGHT ] . nodes [ RIGHT ] ; } expr = this ; while ( expr . opType == OpTypes . CASEWHEN ) { dataType = Type . getAggregateType ( expr . nodes [ RIGHT ] . nodes [ LEFT ] . dataType , dataType ) ; dataType = Type . getAggregateType ( expr . nodes [ RIGHT ] . nodes [ RIGHT ] . dataType , dataType ) ; expr = expr . nodes [ RIGHT ] . nodes [ RIGHT ] ; } expr = this ; while ( expr . opType == OpTypes . CASEWHEN ) { if ( expr . nodes [ RIGHT ] . nodes [ LEFT ] . dataType == null ) { expr . nodes [ RIGHT ] . nodes [ LEFT ] . dataType = dataType ; } if ( expr . nodes [ RIGHT ] . nodes [ RIGHT ] . dataType == null ) { expr . nodes [ RIGHT ] . nodes [ RIGHT ] . dataType = dataType ; } if ( expr . nodes [ RIGHT ] . dataType == null ) { expr . nodes [ RIGHT ] . dataType = dataType ; } expr = expr . nodes [ RIGHT ] . nodes [ RIGHT ] ; } if ( dataType == null ) { throw Error . error ( ErrorCode . X_42567 ) ; } }
For CASE WHEN and its special cases section 9 . 3 of the SQL standard on type aggregation is implemented .
31,232
public static GeographyPointValue fromWKT ( String param ) { if ( param == null ) { throw new IllegalArgumentException ( "Null well known text argument to GeographyPointValue constructor." ) ; } Matcher m = wktPattern . matcher ( param ) ; if ( m . find ( ) ) { double longitude = toDouble ( m . group ( 1 ) , m . group ( 2 ) ) + 0.0 ; double latitude = toDouble ( m . group ( 3 ) , m . group ( 4 ) ) + 0.0 ; if ( Math . abs ( latitude ) > 90.0 ) { throw new IllegalArgumentException ( String . format ( "Latitude \"%f\" out of bounds." , latitude ) ) ; } if ( Math . abs ( longitude ) > 180.0 ) { throw new IllegalArgumentException ( String . format ( "Longitude \"%f\" out of bounds." , longitude ) ) ; } return new GeographyPointValue ( longitude , latitude ) ; } else { throw new IllegalArgumentException ( "Cannot construct GeographyPointValue value from \"" + param + "\"" ) ; } }
Create a GeographyPointValue from a well - known text string .
31,233
String formatLngLat ( ) { DecimalFormat df = new DecimalFormat ( "##0.0###########" ) ; double lng = ( Math . abs ( m_longitude ) < EPSILON ) ? 0 : m_longitude ; double lat = ( Math . abs ( m_latitude ) < EPSILON ) ? 0 : m_latitude ; return df . format ( lng ) + " " + df . format ( lat ) ; }
Format the coordinates for this point . Use 12 digits of precision after the decimal point .
31,234
public static GeographyPointValue unflattenFromBuffer ( ByteBuffer inBuffer , int offset ) { double lng = inBuffer . getDouble ( offset ) ; double lat = inBuffer . getDouble ( offset + BYTES_IN_A_COORD ) ; if ( lat == 360.0 && lng == 360.0 ) { return null ; } return new GeographyPointValue ( lng , lat ) ; }
Deserializes a point from a ByteBuffer at an absolute offset .
31,235
private static double normalize ( double v , double range ) { double a = v - Math . floor ( ( v + ( range / 2 ) ) / range ) * range ; if ( Math . abs ( a ) == 180.0 && ( a * v ) < 0 ) { a *= - 1 ; } return a + 0.0 ; }
by subtracting multiples of 360 .
31,236
public GeographyPointValue mul ( double alpha ) { return GeographyPointValue . normalizeLngLat ( getLongitude ( ) * alpha + 0.0 , getLatitude ( ) * alpha + 0.0 ) ; }
Return a point scaled by the given alpha value .
31,237
public GeographyPointValue rotate ( double phi , GeographyPointValue center ) { double sinphi = Math . sin ( 2 * Math . PI * phi / 360.0 ) ; double cosphi = Math . cos ( 2 * Math . PI * phi / 360.0 ) ; double longitude = getLongitude ( ) - center . getLongitude ( ) ; double latitude = getLatitude ( ) - center . getLatitude ( ) ; return GeographyPointValue . normalizeLngLat ( ( cosphi * longitude - sinphi * latitude ) + center . getLongitude ( ) , ( sinphi * longitude + cosphi * latitude ) + center . getLatitude ( ) ) ; }
Return a new point which is this point rotated by the angle phi around a given center point .
31,238
public static void createPersistentZKNodes ( ZooKeeper zk ) { LinkedList < ZKUtil . StringCallback > callbacks = new LinkedList < ZKUtil . StringCallback > ( ) ; for ( int i = 0 ; i < VoltZK . ZK_HIERARCHY . length ; i ++ ) { ZKUtil . StringCallback cb = new ZKUtil . StringCallback ( ) ; callbacks . add ( cb ) ; zk . create ( VoltZK . ZK_HIERARCHY [ i ] , null , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT , cb , null ) ; } for ( ZKUtil . StringCallback cb : callbacks ) { try { cb . get ( ) ; } catch ( org . apache . zookeeper_voltpatches . KeeperException . NodeExistsException e ) { } catch ( Exception e ) { VoltDB . crashLocalVoltDB ( e . getMessage ( ) , true , e ) ; } } }
Race to create the persistent nodes .
31,239
public static List < MailboxNodeContent > parseMailboxContents ( List < String > jsons ) throws JSONException { ArrayList < MailboxNodeContent > objects = new ArrayList < MailboxNodeContent > ( jsons . size ( ) ) ; for ( String json : jsons ) { MailboxNodeContent content = null ; JSONObject jsObj = new JSONObject ( json ) ; long HSId = jsObj . getLong ( "HSId" ) ; Integer partitionId = null ; if ( jsObj . has ( "partitionId" ) ) { partitionId = jsObj . getInt ( "partitionId" ) ; } content = new MailboxNodeContent ( HSId , partitionId ) ; objects . add ( content ) ; } return objects ; }
Helper method for parsing mailbox node contents into Java objects .
31,240
public static boolean createMigratePartitionLeaderInfo ( ZooKeeper zk , MigratePartitionLeaderInfo info ) { try { zk . create ( migrate_partition_leader_info , info . toBytes ( ) , Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; } catch ( KeeperException e ) { if ( e . code ( ) == KeeperException . Code . NODEEXISTS ) { try { zk . setData ( migrate_partition_leader_info , info . toBytes ( ) , - 1 ) ; } catch ( KeeperException | InterruptedException | JSONException e1 ) { } return false ; } org . voltdb . VoltDB . crashLocalVoltDB ( "Unable to create MigratePartitionLeader Indicator" , true , e ) ; } catch ( InterruptedException | JSONException e ) { org . voltdb . VoltDB . crashLocalVoltDB ( "Unable to create MigratePartitionLeader Indicator" , true , e ) ; } return true ; }
Save MigratePartitionLeader information for error handling
31,241
public static MigratePartitionLeaderInfo getMigratePartitionLeaderInfo ( ZooKeeper zk ) { try { byte [ ] data = zk . getData ( migrate_partition_leader_info , null , null ) ; if ( data != null ) { MigratePartitionLeaderInfo info = new MigratePartitionLeaderInfo ( data ) ; return info ; } } catch ( KeeperException | InterruptedException | JSONException e ) { } return null ; }
get MigratePartitionLeader information
31,242
private boolean convertDateTimeLiteral ( Session session , Expression a , Expression b ) { if ( a . dataType . isDateTimeType ( ) ) { } else if ( b . dataType . isDateTimeType ( ) ) { Expression c = a ; a = b ; b = c ; } else { return false ; } if ( a . dataType . isDateTimeTypeWithZone ( ) ) { return false ; } if ( b . opType == OpTypes . VALUE && b . dataType . isCharacterType ( ) ) { b . valueData = a . dataType . castToType ( session , b . valueData , b . dataType ) ; b . dataType = a . dataType ; return true ; } return false ; }
for compatibility convert a datetime character string to a datetime value for comparison
31,243
void distributeOr ( ) { if ( opType != OpTypes . OR ) { return ; } if ( nodes [ LEFT ] . opType == OpTypes . AND ) { opType = OpTypes . AND ; Expression temp = new ExpressionLogical ( OpTypes . OR , nodes [ LEFT ] . nodes [ RIGHT ] , nodes [ RIGHT ] ) ; nodes [ LEFT ] . opType = OpTypes . OR ; nodes [ LEFT ] . nodes [ RIGHT ] = nodes [ RIGHT ] ; nodes [ RIGHT ] = temp ; } else if ( nodes [ RIGHT ] . opType == OpTypes . AND ) { Expression temp = nodes [ LEFT ] ; nodes [ LEFT ] = nodes [ RIGHT ] ; nodes [ RIGHT ] = temp ; distributeOr ( ) ; return ; } ( ( ExpressionLogical ) nodes [ LEFT ] ) . distributeOr ( ) ; ( ( ExpressionLogical ) nodes [ RIGHT ] ) . distributeOr ( ) ; }
Converts an OR containing an AND to an AND
31,244
boolean isSimpleBound ( ) { if ( opType == OpTypes . IS_NULL ) { return true ; } if ( nodes [ RIGHT ] != null ) { if ( nodes [ RIGHT ] . opType == OpTypes . VALUE ) { return true ; } if ( nodes [ RIGHT ] . opType == OpTypes . SQL_FUNCTION ) { if ( ( ( FunctionSQL ) nodes [ RIGHT ] ) . isValueFunction ( ) ) { return true ; } } } return false ; }
Called only on comparison expressions after reordering which have a COLUMN left leaf
31,245
void swapCondition ( ) { int i = OpTypes . EQUAL ; switch ( opType ) { case OpTypes . GREATER_EQUAL : i = OpTypes . SMALLER_EQUAL ; break ; case OpTypes . SMALLER_EQUAL : i = OpTypes . GREATER_EQUAL ; break ; case OpTypes . SMALLER : i = OpTypes . GREATER ; break ; case OpTypes . GREATER : i = OpTypes . SMALLER ; break ; case OpTypes . NOT_DISTINCT : i = OpTypes . NOT_DISTINCT ; break ; case OpTypes . EQUAL : break ; default : throw Error . runtimeError ( ErrorCode . U_S0500 , "Expression.swapCondition" ) ; } opType = i ; Expression e = nodes [ LEFT ] ; nodes [ LEFT ] = nodes [ RIGHT ] ; nodes [ RIGHT ] = e ; }
Swap the condition with its complement
31,246
private boolean voltConvertBinaryIntegerLiteral ( Session session , Expression lhs , Expression rhs ) { Expression nonIntegralExpr ; int whichChild ; if ( lhs . dataType . isIntegralType ( ) ) { nonIntegralExpr = rhs ; whichChild = RIGHT ; } else if ( rhs . dataType . isIntegralType ( ) ) { nonIntegralExpr = lhs ; whichChild = LEFT ; } else { return false ; } return ExpressionValue . voltMutateToBigintType ( nonIntegralExpr , this , whichChild ) ; }
If one child is an integer and the other is a VARBINARY literal try to convert the literal to an integer .
31,247
public final void delete ( Row row ) { for ( int i = indexList . length - 1 ; i >= 0 ; i -- ) { indexList [ i ] . delete ( this , row ) ; } remove ( row . getPos ( ) ) ; }
Basic delete with no logging or referential checks .
31,248
public int compare ( final Object a , final Object b ) { final long awhen = ( ( Task ) ( a ) ) . getNextScheduled ( ) ; final long bwhen = ( ( Task ) ( b ) ) . getNextScheduled ( ) ; return ( awhen < bwhen ) ? - 1 : ( awhen == bwhen ) ? 0 : 1 ; }
Required to back the priority queue for scheduled tasks .
31,249
public Object scheduleAfter ( final long delay , final Runnable runnable ) throws IllegalArgumentException { if ( runnable == null ) { throw new IllegalArgumentException ( "runnable == null" ) ; } return this . addTask ( now ( ) + delay , runnable , 0 , false ) ; }
Causes the specified Runnable to be executed once in the background after the specified delay .
31,250
public Object scheduleAt ( final Date date , final Runnable runnable ) throws IllegalArgumentException { if ( date == null ) { throw new IllegalArgumentException ( "date == null" ) ; } else if ( runnable == null ) { throw new IllegalArgumentException ( "runnable == null" ) ; } return this . addTask ( date . getTime ( ) , runnable , 0 , false ) ; }
Causes the specified Runnable to be executed once in the background at the specified time .
31,251
public Object schedulePeriodicallyAt ( final Date date , final long period , final Runnable runnable , final boolean relative ) throws IllegalArgumentException { if ( date == null ) { throw new IllegalArgumentException ( "date == null" ) ; } else if ( period <= 0 ) { throw new IllegalArgumentException ( "period <= 0" ) ; } else if ( runnable == null ) { throw new IllegalArgumentException ( "runnable == null" ) ; } return addTask ( date . getTime ( ) , runnable , period , relative ) ; }
Causes the specified Runnable to be executed periodically in the background starting at the specified time .
31,252
public Object schedulePeriodicallyAfter ( final long delay , final long period , final Runnable runnable , final boolean relative ) throws IllegalArgumentException { if ( period <= 0 ) { throw new IllegalArgumentException ( "period <= 0" ) ; } else if ( runnable == null ) { throw new IllegalArgumentException ( "runnable == null" ) ; } return addTask ( now ( ) + delay , runnable , period , relative ) ; }
Causes the specified Runnable to be executed periodically in the background starting after the specified delay .
31,253
public synchronized void shutdownImmediately ( ) { if ( ! this . isShutdown ) { final Thread runner = this . taskRunnerThread ; this . isShutdown = true ; if ( runner != null && runner . isAlive ( ) ) { runner . interrupt ( ) ; } this . taskQueue . cancelAllTasks ( ) ; } }
Shuts down this timer immediately interrupting the wait state associated with the current head of the task queue or the wait state internal to the currently executing task if any such state is currently in effect .
31,254
public static boolean isFixedRate ( final Object task ) { if ( task instanceof Task ) { final Task ltask = ( Task ) task ; return ( ltask . relative && ltask . period > 0 ) ; } else { return false ; } }
Retrieves whether the specified argument references a task scheduled periodically using fixed rate scheduling .
31,255
public static boolean isFixedDelay ( final Object task ) { if ( task instanceof Task ) { final Task ltask = ( Task ) task ; return ( ! ltask . relative && ltask . period > 0 ) ; } else { return false ; } }
Retrieves whether the specified argument references a task scheduled periodically using fixed delay scheduling .
31,256
public static Date getLastScheduled ( Object task ) { if ( task instanceof Task ) { final Task ltask = ( Task ) task ; final long last = ltask . getLastScheduled ( ) ; return ( last == 0 ) ? null : new Date ( last ) ; } else { return null ; } }
Retrieves the last time the referenced task was executed as a Date object . If the task has never been executed null is returned .
31,257
public static Date getNextScheduled ( Object task ) { if ( task instanceof Task ) { final Task ltask = ( Task ) task ; final long next = ltask . isCancelled ( ) ? 0 : ltask . getNextScheduled ( ) ; return next == 0 ? null : new Date ( next ) ; } else { return null ; } }
Retrieves the next time the referenced task is due to be executed as a Date object . If the referenced task is cancelled null is returned .
31,258
protected Task addTask ( final long first , final Runnable runnable , final long period , boolean relative ) { if ( this . isShutdown ) { throw new IllegalStateException ( "shutdown" ) ; } final Task task = new Task ( first , runnable , period , relative ) ; this . taskQueue . addTask ( task ) ; this . restart ( ) ; return task ; }
Adds to the task queue a new Task object encapsulating the supplied Runnable and scheduling arguments .
31,259
protected Task nextTask ( ) { try { while ( ! this . isShutdown || Thread . interrupted ( ) ) { long now ; long next ; long wait ; Task task ; synchronized ( this . taskQueue ) { task = this . taskQueue . peekTask ( ) ; if ( task == null ) { break ; } now = System . currentTimeMillis ( ) ; next = task . next ; wait = ( next - now ) ; if ( wait > 0 ) { this . taskQueue . park ( wait ) ; continue ; } else { this . taskQueue . removeTask ( ) ; } } long period = task . period ; if ( period > 0 ) { if ( task . relative ) { final long late = ( now - next ) ; if ( late > period ) { period = 0 ; } else if ( late > 0 ) { period -= late ; } } task . updateSchedule ( now , now + period ) ; this . taskQueue . addTask ( task ) ; } return task ; } } catch ( InterruptedException e ) { } return null ; }
Retrieves the next task to execute or null if this timer is shutdown the current thread is interrupted or there are no queued tasks .
31,260
ExecutionEngine initializeEE ( ) { String hostname = CoreUtils . getHostnameOrAddress ( ) ; HashinatorConfig hashinatorConfig = TheHashinator . getCurrentConfig ( ) ; ExecutionEngine eeTemp = null ; Deployment deploy = m_context . cluster . getDeployment ( ) . get ( "deployment" ) ; final int defaultDrBufferSize = Integer . getInteger ( "DR_DEFAULT_BUFFER_SIZE" , 512 * 1024 ) ; int configuredTimeout = Integer . getInteger ( "MAX_EXPORT_BUFFER_FLUSH_INTERVAL" , 4 * 1000 ) ; final int exportFlushTimeout = configuredTimeout > 0 ? configuredTimeout : 4 * 1000 ; int tempTableMaxSize = deploy . getSystemsettings ( ) . get ( "systemsettings" ) . getTemptablemaxsize ( ) ; if ( System . getProperty ( "TEMP_TABLE_MAX_SIZE" ) != null ) { tempTableMaxSize = Integer . getInteger ( "TEMP_TABLE_MAX_SIZE" ) ; } try { if ( m_backend . isDefaultJNITarget ) { eeTemp = new ExecutionEngineJNI ( m_context . cluster . getRelativeIndex ( ) , m_siteId , m_partitionId , m_context . getNodeSettings ( ) . getLocalSitesCount ( ) , CoreUtils . getHostIdFromHSId ( m_siteId ) , hostname , m_context . cluster . getDrclusterid ( ) , defaultDrBufferSize , tempTableMaxSize , hashinatorConfig , m_isLowestSiteId , exportFlushTimeout ) ; } else if ( m_backend == BackendTarget . NATIVE_EE_SPY_JNI ) { Class < ? > spyClass = Class . forName ( "org.mockito.Mockito" ) ; Method spyMethod = spyClass . getDeclaredMethod ( "spy" , Object . class ) ; ExecutionEngine internalEE = new ExecutionEngineJNI ( m_context . cluster . getRelativeIndex ( ) , m_siteId , m_partitionId , m_context . getNodeSettings ( ) . getLocalSitesCount ( ) , CoreUtils . getHostIdFromHSId ( m_siteId ) , hostname , m_context . cluster . getDrclusterid ( ) , defaultDrBufferSize , tempTableMaxSize , hashinatorConfig , m_isLowestSiteId , exportFlushTimeout ) ; eeTemp = ( ExecutionEngine ) spyMethod . invoke ( null , internalEE ) ; } else if ( m_backend . isIPC ) { eeTemp = new ExecutionEngineIPC ( m_context . cluster . getRelativeIndex ( ) , m_siteId , m_partitionId , m_context . getNodeSettings ( ) . getLocalSitesCount ( ) , CoreUtils . getHostIdFromHSId ( m_siteId ) , hostname , m_context . cluster . getDrclusterid ( ) , defaultDrBufferSize , tempTableMaxSize , m_backend , VoltDB . instance ( ) . getConfig ( ) . m_ipcPort , hashinatorConfig , m_isLowestSiteId , exportFlushTimeout ) ; } else { throw new VoltAbortException ( String . format ( "Unexpected BackendTarget value %s" , m_backend ) ) ; } eeTemp . loadCatalog ( m_startupConfig . m_timestamp , m_startupConfig . m_serializedCatalog ) ; eeTemp . setBatchTimeout ( m_context . cluster . getDeployment ( ) . get ( "deployment" ) . getSystemsettings ( ) . get ( "systemsettings" ) . getQuerytimeout ( ) ) ; } catch ( final Exception ex ) { hostLog . l7dlog ( Level . FATAL , LogKeys . host_ExecutionSite_FailedConstruction . name ( ) , new Object [ ] { m_siteId , m_siteIndex } , ex ) ; VoltDB . crashLocalVoltDB ( ex . getMessage ( ) , true , ex ) ; } return eeTemp ; }
Create a native VoltDB execution engine
31,261
private static void handleUndoLog ( List < UndoAction > undoLog , boolean undo ) { if ( undoLog == null ) { return ; } if ( undo ) { undoLog = Lists . reverse ( undoLog ) ; } for ( UndoAction action : undoLog ) { if ( undo ) { action . undo ( ) ; } else { action . release ( ) ; } } if ( undo ) { undoLog . clear ( ) ; } }
Java level related stuffs that are also needed to roll back
31,262
public boolean updateCatalog ( String diffCmds , CatalogContext context , boolean requiresSnapshotIsolationboolean , boolean isMPI , long txnId , long uniqueId , long spHandle , boolean isReplay , boolean requireCatalogDiffCmdsApplyToEE , boolean requiresNewExportGeneration ) { CatalogContext oldContext = m_context ; m_context = context ; m_ee . setBatchTimeout ( m_context . cluster . getDeployment ( ) . get ( "deployment" ) . getSystemsettings ( ) . get ( "systemsettings" ) . getQuerytimeout ( ) ) ; m_loadedProcedures . loadProcedures ( m_context , isReplay ) ; m_ee . loadFunctions ( m_context ) ; if ( isMPI ) { return true ; } if ( requireCatalogDiffCmdsApplyToEE == false ) { hostLog . debug ( "Skipped applying diff commands on EE." ) ; return true ; } CatalogMap < Table > tables = m_context . catalog . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) . getTables ( ) ; boolean DRCatalogChange = false ; for ( Table t : tables ) { if ( t . getIsdred ( ) ) { DRCatalogChange |= diffCmds . contains ( "tables#" + t . getTypeName ( ) ) ; if ( DRCatalogChange ) { break ; } } } if ( ! DRCatalogChange ) { CatalogMap < Table > oldTables = oldContext . catalog . getClusters ( ) . get ( "cluster" ) . getDatabases ( ) . get ( "database" ) . getTables ( ) ; for ( Table t : oldTables ) { if ( t . getIsdred ( ) ) { DRCatalogChange |= diffCmds . contains ( CatalogSerializer . getDeleteDiffStatement ( t , "tables" ) ) ; if ( DRCatalogChange ) { break ; } } } } if ( requiresSnapshotIsolationboolean && m_snapshotter . isEESnapshotting ( ) ) { hostLog . info ( String . format ( "Site %d performing schema change operation must block until snapshot is locally complete." , CoreUtils . getSiteIdFromHSId ( m_siteId ) ) ) ; try { m_snapshotter . completeSnapshotWork ( m_sysprocContext ) ; hostLog . info ( String . format ( "Site %d locally finished snapshot. Will update catalog now." , CoreUtils . getSiteIdFromHSId ( m_siteId ) ) ) ; } catch ( InterruptedException e ) { VoltDB . crashLocalVoltDB ( "Unexpected Interrupted Exception while finishing a snapshot for a catalog update." , true , e ) ; } } m_ee . updateCatalog ( m_context . m_genId , requiresNewExportGeneration , diffCmds ) ; if ( DRCatalogChange ) { final DRCatalogCommands catalogCommands = DRCatalogDiffEngine . serializeCatalogCommandsForDr ( m_context . catalog , - 1 ) ; generateDREvent ( EventType . CATALOG_UPDATE , txnId , uniqueId , m_lastCommittedSpHandle , spHandle , catalogCommands . commands . getBytes ( Charsets . UTF_8 ) ) ; } return true ; }
Update the catalog . If we re the MPI don t bother with the EE .
31,263
public boolean updateSettings ( CatalogContext context ) { m_context = context ; m_loadedProcedures . loadProcedures ( m_context ) ; m_ee . loadFunctions ( m_context ) ; return true ; }
Update the system settings
31,264
public long [ ] validatePartitioning ( long [ ] tableIds , byte [ ] hashinatorConfig ) { ByteBuffer paramBuffer = m_ee . getParamBufferForExecuteTask ( 4 + ( 8 * tableIds . length ) + 4 + hashinatorConfig . length ) ; paramBuffer . putInt ( tableIds . length ) ; for ( long tableId : tableIds ) { paramBuffer . putLong ( tableId ) ; } paramBuffer . put ( hashinatorConfig ) ; ByteBuffer resultBuffer = ByteBuffer . wrap ( m_ee . executeTask ( TaskType . VALIDATE_PARTITIONING , paramBuffer ) ) ; long mispartitionedRows [ ] = new long [ tableIds . length ] ; for ( int ii = 0 ; ii < tableIds . length ; ii ++ ) { mispartitionedRows [ ii ] = resultBuffer . getLong ( ) ; } return mispartitionedRows ; }
For the specified list of table ids return the number of mispartitioned rows using the provided hashinator config
31,265
public void generateDREvent ( EventType type , long txnId , long uniqueId , long lastCommittedSpHandle , long spHandle , byte [ ] payloads ) { m_ee . quiesce ( lastCommittedSpHandle ) ; ByteBuffer paramBuffer = m_ee . getParamBufferForExecuteTask ( 32 + 16 + payloads . length ) ; paramBuffer . putInt ( type . ordinal ( ) ) ; paramBuffer . putLong ( uniqueId ) ; paramBuffer . putLong ( lastCommittedSpHandle ) ; paramBuffer . putLong ( spHandle ) ; paramBuffer . putLong ( txnId ) ; paramBuffer . putLong ( getNextUndoToken ( m_currentTxnId ) ) ; paramBuffer . putInt ( payloads . length ) ; paramBuffer . put ( payloads ) ; m_ee . executeTask ( TaskType . GENERATE_DR_EVENT , paramBuffer ) ; }
Generate a in - stream DR event which pushes an event buffer to topend
31,266
public boolean areRepairLogsComplete ( ) { for ( Entry < Long , ReplicaRepairStruct > entry : m_replicaRepairStructs . entrySet ( ) ) { if ( ! entry . getValue ( ) . logsComplete ( ) ) { return false ; } } return true ; }
Have all survivors supplied a full repair log?
31,267
public void repairSurvivors ( ) { if ( this . m_promotionResult . isCancelled ( ) ) { repairLogger . debug ( m_whoami + "skipping repair message creation for cancelled Term." ) ; return ; } if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( m_whoami + "received all repair logs and is repairing surviving replicas." ) ; } for ( Iv2RepairLogResponseMessage li : m_repairLogUnion ) { VoltMessage repairMsg = createRepairMessage ( li ) ; if ( repairLogger . isDebugEnabled ( ) ) { repairLogger . debug ( m_whoami + "repairing: " + CoreUtils . hsIdCollectionToString ( m_survivors ) + " with: " + TxnEgo . txnIdToString ( li . getTxnId ( ) ) + " " + repairMsg ) ; } if ( repairMsg != null ) { m_mailbox . repairReplicasWith ( m_survivors , repairMsg ) ; } } m_promotionResult . set ( new RepairResult ( m_maxSeenTxnId ) ) ; }
Send missed - messages to survivors . Exciting!
31,268
void addToRepairLog ( Iv2RepairLogResponseMessage msg ) { if ( msg . getPayload ( ) == null ) { return ; } if ( msg . getTxnId ( ) <= m_maxSeenCompleteTxnId ) { return ; } Iv2RepairLogResponseMessage prev = m_repairLogUnion . floor ( msg ) ; if ( prev != null && ( prev . getTxnId ( ) != msg . getTxnId ( ) ) ) { prev = null ; } if ( msg . getPayload ( ) instanceof CompleteTransactionMessage ) { m_repairLogUnion . removeIf ( ( p ) -> p . getTxnId ( ) <= msg . getTxnId ( ) ) ; m_repairLogUnion . add ( msg ) ; m_maxSeenCompleteTxnId = msg . getTxnId ( ) ; } else if ( prev == null ) { m_repairLogUnion . add ( msg ) ; } }
replace old messages with complete transaction messages .
31,269
static String getSchemaPath ( String projectFilePath , String path ) throws IOException { File file = null ; if ( path . contains ( ".jar!" ) ) { String ddlText = null ; ddlText = VoltCompilerUtils . readFileFromJarfile ( path ) ; file = VoltProjectBuilder . writeStringToTempFile ( ddlText ) ; } else { file = new File ( path ) ; } if ( ! file . isAbsolute ( ) ) { if ( projectFilePath != null ) { file = new File ( new File ( projectFilePath ) . getParent ( ) , path ) ; } else { file = new File ( path ) ; } } return file . getPath ( ) ; }
Get the path of a schema file optionally relative to a project . xml file s path .
31,270
public void loadFunctions ( CatalogContext catalogContext ) { final CatalogMap < Function > catalogFunctions = catalogContext . database . getFunctions ( ) ; for ( UserDefinedFunctionRunner runner : m_udfs . values ( ) ) { if ( catalogFunctions . get ( runner . m_functionName ) == null ) { FunctionForVoltDB . deregisterUserDefinedFunction ( runner . m_functionName ) ; } } ImmutableMap . Builder < Integer , UserDefinedFunctionRunner > builder = ImmutableMap . < Integer , UserDefinedFunctionRunner > builder ( ) ; for ( final Function catalogFunction : catalogFunctions ) { final String className = catalogFunction . getClassname ( ) ; Class < ? > funcClass = null ; try { funcClass = catalogContext . classForProcedureOrUDF ( className ) ; } catch ( final ClassNotFoundException e ) { if ( className . startsWith ( "org.voltdb." ) ) { String msg = String . format ( ORGVOLTDB_FUNCCNAME_ERROR_FMT , className ) ; VoltDB . crashLocalVoltDB ( msg , false , null ) ; } else { String msg = String . format ( UNABLETOLOAD_ERROR_FMT , className ) ; VoltDB . crashLocalVoltDB ( msg , false , null ) ; } } Object funcInstance = null ; try { funcInstance = funcClass . newInstance ( ) ; } catch ( InstantiationException | IllegalAccessException e ) { throw new RuntimeException ( String . format ( "Error instantiating function \"%s\"" , className ) , e ) ; } assert ( funcInstance != null ) ; builder . put ( catalogFunction . getFunctionid ( ) , new UserDefinedFunctionRunner ( catalogFunction , funcInstance ) ) ; } loadBuiltInJavaFunctions ( builder ) ; m_udfs = builder . build ( ) ; }
Load all the UDFs recorded in the catalog . Instantiate and register them in the system .
31,271
static String readFile ( String file ) { try { FileReader reader = new FileReader ( file ) ; BufferedReader read = new BufferedReader ( reader ) ; StringBuffer b = new StringBuffer ( ) ; String s = null ; int count = 0 ; while ( ( s = read . readLine ( ) ) != null ) { count ++ ; b . append ( s ) ; b . append ( '\n' ) ; } read . close ( ) ; reader . close ( ) ; return b . toString ( ) ; } catch ( IOException e ) { return e . getMessage ( ) ; } }
Redid this file to remove sizing requirements and to make it faster Speeded it up 10 fold .
31,272
static String [ ] getServersFromURL ( String url ) { String prefix = URL_PREFIX + "//" ; int end = url . length ( ) ; if ( url . indexOf ( "?" ) > 0 ) { end = url . indexOf ( "?" ) ; } String servstring = url . substring ( prefix . length ( ) , end ) ; return servstring . split ( "," ) ; }
Static so it s unit - testable yes lazy me
31,273
private void initializeGenerationFromDisk ( final CatalogMap < Connector > connectors , final ExportDataProcessor processor , File [ ] files , List < Pair < Integer , Integer > > localPartitionsToSites , long genId ) { List < Integer > onDiskPartitions = new ArrayList < Integer > ( ) ; NavigableSet < Table > streams = CatalogUtil . getExportTablesExcludeViewOnly ( connectors ) ; Set < String > exportedTables = new HashSet < > ( ) ; for ( Table stream : streams ) { exportedTables . add ( stream . getTypeName ( ) ) ; } Map < String , File > dataFiles = new HashMap < > ( ) ; for ( File data : files ) { if ( data . getName ( ) . endsWith ( ".pbd" ) ) { PbdSegmentName pbdName = PbdSegmentName . parseFile ( exportLog , data ) ; if ( pbdName . m_nonce != null ) { String nonce = pbdName . m_nonce ; String streamName = getStreamNameFromNonce ( nonce ) ; if ( exportedTables . contains ( streamName ) ) { dataFiles . put ( nonce , data ) ; } else { data . delete ( ) ; } } else if ( pbdName . m_result == Result . NOT_PBD ) { exportLog . warn ( data . getAbsolutePath ( ) + " is not a PBD file." ) ; } else if ( pbdName . m_result == Result . INVALID_NAME ) { exportLog . warn ( data . getAbsolutePath ( ) + " doesn't have valid PBD name." ) ; } } } for ( File ad : files ) { if ( ad . getName ( ) . endsWith ( ".ad" ) ) { String nonce = getNonceFromAdFile ( ad ) ; File dataFile = dataFiles . get ( nonce ) ; if ( dataFile != null ) { try { addDataSource ( ad , localPartitionsToSites , onDiskPartitions , processor , genId ) ; } catch ( IOException e ) { VoltDB . crashLocalVoltDB ( "Error intializing export datasource " + ad , true , e ) ; } } else { ad . delete ( ) ; } } } Set < Integer > allLocalPartitions = localPartitionsToSites . stream ( ) . map ( p -> p . getFirst ( ) ) . collect ( Collectors . toSet ( ) ) ; Set < Integer > onDIskPartitionsSet = new HashSet < Integer > ( onDiskPartitions ) ; onDIskPartitionsSet . removeAll ( allLocalPartitions ) ; if ( ! onDIskPartitionsSet . isEmpty ( ) ) { createAckMailboxesIfNeeded ( onDIskPartitionsSet ) ; } }
Initialize generation from disk creating data sources from the PBD files .
31,274
void initializeGenerationFromCatalog ( CatalogContext catalogContext , final CatalogMap < Connector > connectors , final ExportDataProcessor processor , int hostId , List < Pair < Integer , Integer > > localPartitionsToSites , boolean isCatalogUpdate ) { m_catalogVersion = catalogContext . catalogVersion ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Updating to catalog version : " + m_catalogVersion ) ; } Set < String > currentTables = new HashSet < > ( ) ; synchronized ( m_dataSourcesByPartition ) { for ( Iterator < Map < String , ExportDataSource > > it = m_dataSourcesByPartition . values ( ) . iterator ( ) ; it . hasNext ( ) ; ) { Map < String , ExportDataSource > sources = it . next ( ) ; currentTables . addAll ( sources . keySet ( ) ) ; } } if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Current tables: " + currentTables ) ; } Set < Integer > partitionsInUse = localPartitionsToSites . stream ( ) . map ( p -> p . getFirst ( ) ) . collect ( Collectors . toSet ( ) ) ; boolean createdSources = false ; NavigableSet < Table > streams = CatalogUtil . getExportTablesExcludeViewOnly ( connectors ) ; Set < String > exportedTables = new HashSet < > ( ) ; for ( Table stream : streams ) { addDataSources ( stream , hostId , localPartitionsToSites , partitionsInUse , processor , catalogContext . m_genId , isCatalogUpdate ) ; exportedTables . add ( stream . getTypeName ( ) ) ; createdSources = true ; } updateStreamStatus ( exportedTables ) ; for ( String table : exportedTables ) { currentTables . remove ( table ) ; } if ( ! currentTables . isEmpty ( ) ) { removeDataSources ( currentTables ) ; } createAckMailboxesIfNeeded ( createdSources ? partitionsInUse : new HashSet < Integer > ( ) ) ; }
Initialize generation from catalog .
31,275
private void updateStreamStatus ( Set < String > exportedTables ) { synchronized ( m_dataSourcesByPartition ) { for ( Iterator < Map < String , ExportDataSource > > it = m_dataSourcesByPartition . values ( ) . iterator ( ) ; it . hasNext ( ) ; ) { Map < String , ExportDataSource > sources = it . next ( ) ; for ( String tableName : sources . keySet ( ) ) { ExportDataSource src = sources . get ( tableName ) ; if ( ! exportedTables . contains ( tableName ) ) { src . setStatus ( ExportDataSource . StreamStatus . DROPPED ) ; } else if ( src . getStatus ( ) == ExportDataSource . StreamStatus . DROPPED ) { src . setStatus ( ExportDataSource . StreamStatus . ACTIVE ) ; } } } } }
Mark a DataSource as dropped if its not present in the connectors .
31,276
private void sendDummyTakeMastershipResponse ( long sourceHsid , long requestId , int partitionId , byte [ ] signatureBytes ) { int msgLen = 1 + 4 + 4 + signatureBytes . length + 8 ; ByteBuffer buf = ByteBuffer . allocate ( msgLen ) ; buf . put ( ExportManager . TAKE_MASTERSHIP_RESPONSE ) ; buf . putInt ( partitionId ) ; buf . putInt ( signatureBytes . length ) ; buf . put ( signatureBytes ) ; buf . putLong ( requestId ) ; BinaryPayloadMessage bpm = new BinaryPayloadMessage ( new byte [ 0 ] , buf . array ( ) ) ; m_mbox . send ( sourceHsid , bpm ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Partition " + partitionId + " mailbox hsid (" + CoreUtils . hsIdToString ( m_mbox . getHSId ( ) ) + ") send dummy TAKE_MASTERSHIP_RESPONSE message(" + requestId + ") to " + CoreUtils . hsIdToString ( sourceHsid ) ) ; } }
Auto reply a response when the requested stream is no longer exists
31,277
public void updateAckMailboxes ( int partition , Set < Long > newHSIds ) { ImmutableList < Long > replicaHSIds = m_replicasHSIds . get ( partition ) ; synchronized ( m_dataSourcesByPartition ) { Map < String , ExportDataSource > partitionMap = m_dataSourcesByPartition . get ( partition ) ; if ( partitionMap == null ) { return ; } for ( ExportDataSource eds : partitionMap . values ( ) ) { eds . updateAckMailboxes ( Pair . of ( m_mbox , replicaHSIds ) ) ; if ( newHSIds != null && ! newHSIds . isEmpty ( ) ) { eds . forwardAckToNewJoinedReplicas ( newHSIds ) ; eds . queryForBestCandidate ( ) ; } } } }
Access by multiple threads
31,278
private void addDataSources ( Table table , int hostId , List < Pair < Integer , Integer > > localPartitionsToSites , Set < Integer > partitionsInUse , final ExportDataProcessor processor , final long genId , boolean isCatalogUpdate ) { for ( Pair < Integer , Integer > partitionAndSiteId : localPartitionsToSites ) { int partition = partitionAndSiteId . getFirst ( ) ; int siteId = partitionAndSiteId . getSecond ( ) ; synchronized ( m_dataSourcesByPartition ) { try { Map < String , ExportDataSource > dataSourcesForPartition = m_dataSourcesByPartition . get ( partition ) ; if ( dataSourcesForPartition == null ) { dataSourcesForPartition = new HashMap < String , ExportDataSource > ( ) ; m_dataSourcesByPartition . put ( partition , dataSourcesForPartition ) ; } final String key = table . getTypeName ( ) ; if ( ! dataSourcesForPartition . containsKey ( key ) ) { ExportDataSource exportDataSource = new ExportDataSource ( this , processor , "database" , key , partition , siteId , genId , table . getColumns ( ) , table . getPartitioncolumn ( ) , m_directory . getPath ( ) ) ; int migrateBatchSize = CatalogUtil . getPersistentMigrateBatchSize ( key ) ; exportDataSource . setupMigrateRowsDeleter ( migrateBatchSize ) ; if ( exportLog . isDebugEnabled ( ) ) { exportLog . debug ( "Creating ExportDataSource for table in catalog " + key + " partition " + partition + " site " + siteId ) ; } dataSourcesForPartition . put ( key , exportDataSource ) ; if ( isCatalogUpdate ) { exportDataSource . updateCatalog ( table , genId ) ; } } else { ExportDataSource eds = dataSourcesForPartition . get ( key ) ; ExportClientBase client = processor . getExportClient ( key ) ; if ( client != null ) { eds . setClient ( client ) ; eds . setRunEveryWhere ( client . isRunEverywhere ( ) ) ; } else { eds . setClient ( null ) ; eds . setRunEveryWhere ( false ) ; } eds . markInCatalog ( partitionsInUse . contains ( partition ) ) ; if ( isCatalogUpdate ) { eds . updateCatalog ( table , genId ) ; } } } catch ( IOException e ) { VoltDB . crashLocalVoltDB ( "Error creating datasources for table " + table . getTypeName ( ) + " host id " + hostId , true , e ) ; } } } }
Add datasources for a catalog table in all partitions
31,279
public void onSourceDrained ( int partitionId , String tableName ) { ExportDataSource source ; synchronized ( m_dataSourcesByPartition ) { Map < String , ExportDataSource > sources = m_dataSourcesByPartition . get ( partitionId ) ; if ( sources == null ) { if ( ! m_removingPartitions . contains ( partitionId ) ) { exportLog . error ( "Could not find export data sources for partition " + partitionId + ". The export cleanup stream is being discarded." ) ; } return ; } source = sources . get ( tableName ) ; if ( source == null ) { exportLog . warn ( "Could not find export data source for signature " + partitionId + " name " + tableName + ". The export cleanup stream is being discarded." ) ; return ; } sources . remove ( tableName ) ; if ( sources . isEmpty ( ) ) { m_dataSourcesByPartition . remove ( partitionId ) ; removeMailbox ( partitionId ) ; } } exportLog . info ( "Drained on unused partition " + partitionId + ": " + source ) ; source . closeAndDelete ( ) ; }
The Export Data Source reports it is drained on an unused partition .
31,280
public void add ( int index , Object element ) { if ( index > elementCount ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + ">" + elementCount ) ; } if ( index < 0 ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " < 0" ) ; } if ( elementCount >= elementData . length ) { increaseCapacity ( ) ; } for ( int i = elementCount ; i > index ; i -- ) { elementData [ i ] = elementData [ i - 1 ] ; } elementData [ index ] = element ; elementCount ++ ; }
Inserts an element at the given index
31,281
public boolean add ( Object element ) { if ( elementCount >= elementData . length ) { increaseCapacity ( ) ; } elementData [ elementCount ] = element ; elementCount ++ ; return true ; }
Appends an element to the end of the list
31,282
public Object get ( int index ) { if ( index >= elementCount ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " >= " + elementCount ) ; } if ( index < 0 ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " < 0" ) ; } return elementData [ index ] ; }
Gets the element at given position
31,283
public Object remove ( int index ) { if ( index >= elementCount ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " >= " + elementCount ) ; } if ( index < 0 ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " < 0" ) ; } Object removedObj = elementData [ index ] ; for ( int i = index ; i < elementCount - 1 ; i ++ ) { elementData [ i ] = elementData [ i + 1 ] ; } elementCount -- ; if ( elementCount == 0 ) { clear ( ) ; } else { elementData [ elementCount ] = null ; } return removedObj ; }
Removes and returns the element at given position
31,284
public Object set ( int index , Object element ) { if ( index >= elementCount ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " >= " + elementCount ) ; } if ( index < 0 ) { throw new IndexOutOfBoundsException ( "Index out of bounds: " + index + " < 0" ) ; } Object replacedObj = elementData [ index ] ; elementData [ index ] = element ; return replacedObj ; }
Replaces the element at given position
31,285
public static boolean bufEquals ( byte onearray [ ] , byte twoarray [ ] ) { if ( onearray == twoarray ) return true ; boolean ret = ( onearray . length == twoarray . length ) ; if ( ! ret ) { return ret ; } for ( int idx = 0 ; idx < onearray . length ; idx ++ ) { if ( onearray [ idx ] != twoarray [ idx ] ) { return false ; } } return true ; }
equals function that actually compares two buffers .
31,286
public Connection getConnection ( String curDriverIn , String curCharsetIn , String curTrustStoreIn ) throws ClassNotFoundException , MalformedURLException , SQLException { String curDriver = curDriverIn ; String curCharset = curCharsetIn ; String curTrustStore = curTrustStoreIn ; Properties sysProps = System . getProperties ( ) ; if ( curDriver == null ) { curDriver = ( ( driver == null ) ? DEFAULT_JDBC_DRIVER : driver ) ; } if ( curCharset == null && charset != null ) { curCharset = charset ; } if ( curTrustStore == null && truststore != null ) { curTrustStore = truststore ; } if ( curCharset == null ) { sysProps . remove ( "sqlfile.charset" ) ; } else { sysProps . put ( "sqlfile.charset" , curCharset ) ; } if ( curTrustStore == null ) { sysProps . remove ( "javax.net.ssl.trustStore" ) ; } else { sysProps . put ( "javax.net.ssl.trustStore" , curTrustStore ) ; } String urlString = null ; try { urlString = expandSysPropVars ( url ) ; } catch ( IllegalArgumentException iae ) { throw new MalformedURLException ( iae . getMessage ( ) + " for URL '" + url + "'" ) ; } String userString = null ; if ( username != null ) try { userString = expandSysPropVars ( username ) ; } catch ( IllegalArgumentException iae ) { throw new MalformedURLException ( iae . getMessage ( ) + " for user name '" + username + "'" ) ; } String passwordString = null ; if ( password != null ) try { passwordString = expandSysPropVars ( password ) ; } catch ( IllegalArgumentException iae ) { throw new MalformedURLException ( iae . getMessage ( ) + " for password" ) ; } Class . forName ( curDriver ) ; Connection c = ( userString == null ) ? DriverManager . getConnection ( urlString ) : DriverManager . getConnection ( urlString , userString , passwordString ) ; if ( ti != null ) RCData . setTI ( c , ti ) ; return c ; }
Gets a JDBC Connection using the data of this RCData object with specified override elements
31,287
static public String tiToString ( int ti ) { switch ( ti ) { case Connection . TRANSACTION_READ_UNCOMMITTED : return "TRANSACTION_READ_UNCOMMITTED" ; case Connection . TRANSACTION_READ_COMMITTED : return "TRANSACTION_READ_COMMITTED" ; case Connection . TRANSACTION_REPEATABLE_READ : return "TRANSACTION_REPEATABLE_READ" ; case Connection . TRANSACTION_SERIALIZABLE : return "TRANSACTION_SERIALIZABLE" ; case Connection . TRANSACTION_NONE : return "TRANSACTION_NONE" ; } return "Custom Transaction Isolation numerical value: " + ti ; }
Return String for numerical java . sql . Connection Transaction level .
31,288
protected void handleJSONMessageAsDummy ( JSONObject obj ) throws Exception { hostLog . info ( "Generating dummy response for ops request " + obj ) ; sendOpsResponse ( null , obj , OPS_DUMMY ) ; }
For OPS actions generate a dummy response to the distributed work to avoid startup initialization dependencies . Startup can take a long time and we don t want to prevent other agents from making progress
31,289
public void performOpsAction ( final Connection c , final long clientHandle , final OpsSelector selector , final ParameterSet params ) throws Exception { m_es . submit ( new Runnable ( ) { public void run ( ) { try { collectStatsImpl ( c , clientHandle , selector , params ) ; } catch ( Exception e ) { hostLog . warn ( "Exception while attempting to collect stats" , e ) ; sendErrorResponse ( c , ClientResponse . OPERATIONAL_FAILURE , "Failed to get statistics (" + e . getMessage ( ) + ")." , clientHandle ) ; } } } ) ; }
Perform the action associated with this agent using the provided ParameterSet . This is the entry point to the OPS system .
31,290
protected void distributeOpsWork ( PendingOpsRequest newRequest , JSONObject obj ) throws Exception { if ( m_pendingRequests . size ( ) > MAX_IN_FLIGHT_REQUESTS ) { Iterator < Entry < Long , PendingOpsRequest > > iter = m_pendingRequests . entrySet ( ) . iterator ( ) ; final long now = System . currentTimeMillis ( ) ; boolean foundExpiredRequest = false ; while ( iter . hasNext ( ) ) { PendingOpsRequest por = iter . next ( ) . getValue ( ) ; if ( now - por . startTime > OPS_COLLECTION_TIMEOUT * 2 ) { iter . remove ( ) ; foundExpiredRequest = true ; } } if ( ! foundExpiredRequest ) { sendErrorResponse ( newRequest . c , ClientResponse . GRACEFUL_FAILURE , "Too many pending stat requests" , newRequest . clientData ) ; return ; } } final long requestId = m_nextRequestId ++ ; m_pendingRequests . put ( requestId , newRequest ) ; newRequest . timer = m_es . schedule ( new Runnable ( ) { public void run ( ) { checkForRequestTimeout ( requestId ) ; } } , OPS_COLLECTION_TIMEOUT , TimeUnit . MILLISECONDS ) ; obj . put ( "requestId" , requestId ) ; obj . put ( "returnAddress" , m_mailbox . getHSId ( ) ) ; int siteId = CoreUtils . getSiteIdFromHSId ( m_mailbox . getHSId ( ) ) ; byte payloadBytes [ ] = CompressionService . compressBytes ( obj . toString ( 4 ) . getBytes ( "UTF-8" ) ) ; for ( int hostId : m_messenger . getLiveHostIds ( ) ) { long agentHsId = CoreUtils . getHSIdFromHostAndSite ( hostId , siteId ) ; newRequest . expectedOpsResponses ++ ; BinaryPayloadMessage bpm = new BinaryPayloadMessage ( new byte [ ] { JSON_PAYLOAD } , payloadBytes ) ; m_mailbox . send ( agentHsId , bpm ) ; } }
For OPS actions which run on every node this method will distribute the necessary parameters to its peers on the other cluster nodes . Additionally it will pre - check for excessive outstanding requests and initialize the tracking and timeout of the new request . Subclasses of OpsAgent should use this when they need this service .
31,291
protected void sendClientResponse ( PendingOpsRequest request ) { byte statusCode = ClientResponse . SUCCESS ; String statusString = null ; VoltTable responseTables [ ] = request . aggregateTables ; if ( responseTables == null || responseTables . length == 0 ) { responseTables = new VoltTable [ 0 ] ; statusCode = ClientResponse . GRACEFUL_FAILURE ; statusString = "Requested info \"" + request . subselector + "\" is not yet available or not supported in the current configuration." ; } ClientResponseImpl response = new ClientResponseImpl ( statusCode , ClientResponse . UNINITIALIZED_APP_STATUS_CODE , null , responseTables , statusString ) ; response . setClientHandle ( request . clientData ) ; ByteBuffer buf = ByteBuffer . allocate ( response . getSerializedSize ( ) + 4 ) ; buf . putInt ( buf . capacity ( ) - 4 ) ; response . flattenToBuffer ( buf ) . flip ( ) ; request . c . writeStream ( ) . enqueue ( buf ) ; }
Send the final response stored in the PendingOpsRequest to the client which initiated the action . Will be called automagically after aggregating cluster - wide responses but may be called directly by subclasses if necessary .
31,292
private void sendOpsResponse ( VoltTable [ ] results , JSONObject obj , byte payloadType ) throws Exception { long requestId = obj . getLong ( "requestId" ) ; long returnAddress = obj . getLong ( "returnAddress" ) ; if ( results == null ) { ByteBuffer responseBuffer = ByteBuffer . allocate ( 8 ) ; responseBuffer . putLong ( requestId ) ; byte responseBytes [ ] = CompressionService . compressBytes ( responseBuffer . array ( ) ) ; BinaryPayloadMessage bpm = new BinaryPayloadMessage ( new byte [ ] { payloadType } , responseBytes ) ; m_mailbox . send ( returnAddress , bpm ) ; return ; } ByteBuffer [ ] bufs = new ByteBuffer [ results . length ] ; int statbytes = 0 ; for ( int i = 0 ; i < results . length ; i ++ ) { bufs [ i ] = results [ i ] . getBuffer ( ) ; bufs [ i ] . position ( 0 ) ; statbytes += bufs [ i ] . remaining ( ) ; } ByteBuffer responseBuffer = ByteBuffer . allocate ( 8 + 4 * results . length + + statbytes ) ; responseBuffer . putLong ( requestId ) ; for ( ByteBuffer buf : bufs ) { responseBuffer . putInt ( buf . remaining ( ) ) ; responseBuffer . put ( buf ) ; } byte responseBytes [ ] = CompressionService . compressBytes ( responseBuffer . array ( ) ) ; BinaryPayloadMessage bpm = new BinaryPayloadMessage ( new byte [ ] { payloadType } , responseBytes ) ; m_mailbox . send ( returnAddress , bpm ) ; }
Return the results of distributed work to the original requesting agent . Used by subclasses to respond after they ve done their local work .
31,293
private static void addUDFDependences ( Function function , Statement catalogStmt ) { Procedure procedure = ( Procedure ) catalogStmt . getParent ( ) ; addFunctionDependence ( function , procedure , catalogStmt ) ; addStatementDependence ( function , catalogStmt ) ; }
Add all statement dependences both ways .
31,294
private static void addFunctionDependence ( Function function , Procedure procedure , Statement catalogStmt ) { String funcDeps = function . getStmtdependers ( ) ; Set < String > stmtSet = new TreeSet < > ( ) ; for ( String stmtName : funcDeps . split ( "," ) ) { if ( ! stmtName . isEmpty ( ) ) { stmtSet . add ( stmtName ) ; } } String statementName = procedure . getTypeName ( ) + ":" + catalogStmt . getTypeName ( ) ; if ( stmtSet . contains ( statementName ) ) { return ; } stmtSet . add ( statementName ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "," ) ; for ( String stmtName : stmtSet ) { sb . append ( stmtName + "," ) ; } function . setStmtdependers ( sb . toString ( ) ) ; }
Add a dependence to a function of a statement . The function s dependence string is altered with this function .
31,295
private static void addStatementDependence ( Function function , Statement catalogStmt ) { String fnDeps = catalogStmt . getFunctiondependees ( ) ; Set < String > fnSet = new TreeSet < > ( ) ; for ( String fnName : fnDeps . split ( "," ) ) { if ( ! fnName . isEmpty ( ) ) { fnSet . add ( fnName ) ; } } String functionName = function . getTypeName ( ) ; if ( fnSet . contains ( functionName ) ) { return ; } fnSet . add ( functionName ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "," ) ; for ( String fnName : fnSet ) { sb . append ( fnName + "," ) ; } catalogStmt . setFunctiondependees ( sb . toString ( ) ) ; }
Add a dependence of a statement to a function . The statement s dependence string is altered with this function .
31,296
static boolean fragmentReferencesPersistentTable ( AbstractPlanNode node ) { if ( node == null ) return false ; if ( node instanceof AbstractScanPlanNode ) return true ; if ( node instanceof InsertPlanNode ) return true ; if ( node instanceof DeletePlanNode ) return true ; if ( node instanceof UpdatePlanNode ) return true ; for ( int i = 0 ; i < node . getChildCount ( ) ; i ++ ) { AbstractPlanNode child = node . getChild ( i ) ; if ( fragmentReferencesPersistentTable ( child ) ) return true ; } return false ; }
Check through a plan graph and return true if it ever touches a persistent table .
31,297
public static Procedure compileNibbleDeleteProcedure ( Table catTable , String procName , Column col , ComparisonOperation comp ) { Procedure newCatProc = addProcedure ( catTable , procName ) ; String countingQuery = genSelectSqlForNibbleDelete ( catTable , col , comp ) ; addStatement ( catTable , newCatProc , countingQuery , "0" ) ; String deleteQuery = genDeleteSqlForNibbleDelete ( catTable , col , comp ) ; addStatement ( catTable , newCatProc , deleteQuery , "1" ) ; String valueAtQuery = genValueAtOffsetSqlForNibbleDelete ( catTable , col , comp ) ; addStatement ( catTable , newCatProc , valueAtQuery , "2" ) ; return newCatProc ; }
Generate small deletion queries by using count - select - delete pattern .
31,298
public static Procedure compileMigrateProcedure ( Table table , String procName , Column column , ComparisonOperation comparison ) { Procedure proc = addProcedure ( table , procName ) ; StringBuilder sb = new StringBuilder ( ) ; sb . append ( "SELECT COUNT(*) FROM " + table . getTypeName ( ) ) ; sb . append ( " WHERE not migrating AND " + column . getName ( ) + " " + comparison . toString ( ) + " ?;" ) ; addStatement ( table , proc , sb . toString ( ) , "0" ) ; sb . setLength ( 0 ) ; sb . append ( "SELECT " + column . getName ( ) + " FROM " + table . getTypeName ( ) ) ; sb . append ( " WHERE not migrating ORDER BY " + column . getName ( ) ) ; if ( comparison == ComparisonOperation . LTE || comparison == ComparisonOperation . LT ) { sb . append ( " ASC OFFSET ? LIMIT 1;" ) ; } else { sb . append ( " DESC OFFSET ? LIMIT 1;" ) ; } addStatement ( table , proc , sb . toString ( ) , "1" ) ; sb . setLength ( 0 ) ; sb . append ( "MIGRATE FROM " + table . getTypeName ( ) ) ; sb . append ( " WHERE not migrating AND " + column . getName ( ) + " " + comparison . toString ( ) + " ?;" ) ; addStatement ( table , proc , sb . toString ( ) , "2" ) ; return proc ; }
Generate migrate queries by using count - select - migrate pattern .
31,299
public static < E > Collection < E > constrainedCollection ( Collection < E > collection , Constraint < ? super E > constraint ) { return new ConstrainedCollection < E > ( collection , constraint ) ; }
Returns a constrained view of the specified collection using the specified constraint . Any operations that add new elements to the collection will call the provided constraint . However this method does not verify that existing elements satisfy the constraint .