idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
40,700
public void deleteTmpFile ( String name , JobID jobID ) { DeleteProcess dp = new DeleteProcess ( name , jobID , count . get ( new ImmutablePair ( jobID , name ) ) ) ; executorService . schedule ( dp , 5000L , TimeUnit . MILLISECONDS ) ; }
Leave a 5 seconds delay to clear the local file .
40,701
public void configure ( Configuration parameters ) { super . configure ( parameters ) ; String delimString = parameters . getString ( RECORD_DELIMITER , null ) ; if ( delimString != null ) { String charsetName = parameters . getString ( RECORD_DELIMITER_ENCODING , null ) ; if ( charsetName == null ) { setDelimiter ( delimString ) ; } else { try { setDelimiter ( delimString , charsetName ) ; } catch ( UnsupportedCharsetException e ) { throw new IllegalArgumentException ( "The charset with the name '" + charsetName + "' is not supported on this TaskManager instance." , e ) ; } } } String samplesString = parameters . getString ( NUM_STATISTICS_SAMPLES , null ) ; if ( samplesString != null ) { try { setNumLineSamples ( Integer . parseInt ( samplesString ) ) ; } catch ( NumberFormatException e ) { if ( LOG . isWarnEnabled ( ) ) { LOG . warn ( "Invalid value for number of samples to take: " + samplesString + ". Skipping sampling." ) ; } setNumLineSamples ( 0 ) ; } } }
Configures this input format by reading the path to the file from the configuration andge the string that defines the record delimiter .
40,702
@ SuppressWarnings ( "unchecked" ) static InstanceManager loadInstanceManager ( final String instanceManagerClassName ) { Class < ? extends InstanceManager > instanceManagerClass ; try { instanceManagerClass = ( Class < ? extends InstanceManager > ) Class . forName ( instanceManagerClassName ) ; } catch ( ClassNotFoundException e ) { LOG . error ( "Cannot find class " + instanceManagerClassName + ": " + StringUtils . stringifyException ( e ) ) ; return null ; } InstanceManager instanceManager ; try { instanceManager = instanceManagerClass . newInstance ( ) ; } catch ( InstantiationException e ) { LOG . error ( "Cannot create instanceManager: " + StringUtils . stringifyException ( e ) ) ; return null ; } catch ( IllegalAccessException e ) { LOG . error ( "Cannot create instanceManager: " + StringUtils . stringifyException ( e ) ) ; return null ; } return instanceManager ; }
Tries to locate a class with given name and to instantiate a instance manager from it .
40,703
private void recycleBuffer ( MemorySegment buffer ) { synchronized ( this . buffers ) { if ( this . isDestroyed ) { this . globalBufferPool . returnBuffer ( buffer ) ; this . numRequestedBuffers -- ; } else { if ( this . numRequestedBuffers > this . numDesignatedBuffers ) { this . globalBufferPool . returnBuffer ( buffer ) ; this . numRequestedBuffers -- ; } else if ( ! this . listeners . isEmpty ( ) ) { Buffer availableBuffer = new Buffer ( buffer , buffer . size ( ) , this . recycler ) ; try { this . listeners . poll ( ) . bufferAvailable ( availableBuffer ) ; } catch ( Exception e ) { this . buffers . add ( buffer ) ; this . buffers . notify ( ) ; } } else { this . buffers . add ( buffer ) ; this . buffers . notify ( ) ; } } } }
Returns a buffer to the buffer pool and notifies listeners about the availability of a new buffer .
40,704
public int getDistance ( final NetworkNode networkNode ) { int steps = 0 ; NetworkNode tmp = this ; while ( tmp != null ) { final int distance = tmp . isPredecessorOrSelfOf ( networkNode ) ; if ( distance >= 0 ) { return ( steps + distance ) ; } tmp = tmp . getParentNode ( ) ; ++ steps ; } return Integer . MAX_VALUE ; }
Determines the distance to the given network node . The distance is determined as the number of internal network nodes that must be traversed in order to send a packet from one node to the other plus one .
40,705
public Record nextRecord ( Record record ) { try { resultSet . next ( ) ; ResultSetMetaData rsmd = resultSet . getMetaData ( ) ; int column_count = rsmd . getColumnCount ( ) ; record . setNumFields ( column_count ) ; for ( int pos = 0 ; pos < column_count ; pos ++ ) { int type = rsmd . getColumnType ( pos + 1 ) ; retrieveTypeAndFillRecord ( pos , type , record ) ; } return record ; } catch ( SQLException e ) { throw new IllegalArgumentException ( "Couldn't read data:\t" + e . getMessage ( ) ) ; } catch ( NotTransformableSQLFieldException e ) { throw new IllegalArgumentException ( "Couldn't read data because of unknown column sql-type:\t" + e . getMessage ( ) ) ; } catch ( NullPointerException e ) { throw new IllegalArgumentException ( "Couldn't access resultSet:\t" + e . getMessage ( ) ) ; } }
Stores the next resultSet row in a Record
40,706
public < T extends Aggregator < ? > > T getIterationAggregator ( String name ) { return this . runtimeContext . < T > getIterationAggregator ( name ) ; }
Gets the iteration aggregator registered under the given name . The iteration aggregator is combines all aggregates globally once per superstep and makes them available in the next superstep .
40,707
public < T extends Value > T getPreviousIterationAggregate ( String name ) { return this . runtimeContext . < T > getPreviousIterationAggregate ( name ) ; }
Get the aggregated value that an aggregator computed in the previous iteration .
40,708
public void writeBlock ( MemorySegment segment ) throws IOException { checkErroneous ( ) ; this . requestsNotReturned . incrementAndGet ( ) ; if ( this . closed || this . requestQueue . isClosed ( ) ) { this . requestsNotReturned . decrementAndGet ( ) ; throw new IOException ( "The writer has been closed." ) ; } this . requestQueue . add ( new SegmentWriteRequest ( this , segment ) ) ; }
Issues a asynchronous write request to the writer .
40,709
static Options getProgramSpecificOptions ( Options options ) { options . addOption ( JAR_OPTION ) ; options . addOption ( CLASS_OPTION ) ; options . addOption ( PARALLELISM_OPTION ) ; options . addOption ( ARGS_OPTION ) ; return options ; }
gets the program options with the old flags for jar file and arguments
40,710
static Options getProgramSpecificOptionsWithoutDeprecatedOptions ( Options options ) { options . addOption ( CLASS_OPTION ) ; options . addOption ( PARALLELISM_OPTION ) ; return options ; }
gets the program options without the old flags for jar file and arguments
40,711
static Options getRunOptions ( Options options ) { Options o = getProgramSpecificOptions ( options ) ; return getJobManagerAddressOption ( o ) ; }
Builds command line options for the run action .
40,712
static Options getInfoOptions ( Options options ) { options = getProgramSpecificOptions ( options ) ; options = getJobManagerAddressOption ( options ) ; options . addOption ( DESCR_OPTION ) ; options . addOption ( PLAN_OPTION ) ; return options ; }
Builds command line options for the info action .
40,713
static Options getListOptions ( Options options ) { options . addOption ( RUNNING_OPTION ) ; options . addOption ( SCHEDULED_OPTION ) ; options = getJobManagerAddressOption ( options ) ; return options ; }
Builds command line options for the list action .
40,714
static Options getCancelOptions ( Options options ) { options . addOption ( ID_OPTION ) ; options = getJobManagerAddressOption ( options ) ; return options ; }
Builds command line options for the cancel action .
40,715
protected int cancel ( String [ ] args ) { CommandLine line ; try { line = parser . parse ( CANCEL_OPTIONS , args , false ) ; } catch ( MissingOptionException e ) { System . out . println ( e . getMessage ( ) ) ; printHelpForCancel ( ) ; return 1 ; } catch ( UnrecognizedOptionException e ) { System . out . println ( e . getMessage ( ) ) ; printHelpForCancel ( ) ; return 2 ; } catch ( Exception e ) { return handleError ( e ) ; } if ( printHelp ) { printHelpForCancel ( ) ; return 0 ; } JobID jobId ; if ( line . hasOption ( ID_OPTION . getOpt ( ) ) ) { String jobIdString = line . getOptionValue ( ID_OPTION . getOpt ( ) ) ; try { jobId = new JobID ( StringUtils . hexStringToByte ( jobIdString ) ) ; } catch ( Exception e ) { System . out . println ( "Error: The value for the Job ID is not a valid ID." ) ; printHelpForCancel ( ) ; return 1 ; } } else { System . out . println ( "Error: Specify a Job ID to cancel a job." ) ; printHelpForCancel ( ) ; return 1 ; } ExtendedManagementProtocol jmConn = null ; try { jmConn = getJobManagerConnection ( line ) ; if ( jmConn == null ) { printHelpForCancel ( ) ; return 1 ; } jmConn . cancelJob ( jobId ) ; return 0 ; } catch ( Throwable t ) { return handleError ( t ) ; } finally { if ( jmConn != null ) { try { RPC . stopProxy ( jmConn ) ; } catch ( Throwable t ) { System . out . println ( "Warning: Could not cleanly shut down connection to the JobManager." ) ; } } jmConn = null ; } }
Executes the cancel action .
40,716
protected Configuration getGlobalConfiguration ( ) { if ( ! globalConfigurationLoaded ) { String location = getConfigurationDirectory ( ) ; GlobalConfiguration . loadConfiguration ( location ) ; globalConfigurationLoaded = true ; } return GlobalConfiguration . getConfiguration ( ) ; }
Reads configuration settings . The default path can be overridden by setting the ENV variable STRATOSPHERE_CONF_DIR .
40,717
private int handleError ( Throwable t ) { System . out . println ( "Error: " + t . getMessage ( ) ) ; if ( this . verbose ) { t . printStackTrace ( ) ; } else { System . out . println ( "For a more detailed error message use the vebose output option '-v'." ) ; } return 1 ; }
Displays exceptions .
40,718
public static void main ( String [ ] args ) throws ParseException { CliFrontend cli = new CliFrontend ( ) ; int retCode = cli . parseParameters ( args ) ; System . exit ( retCode ) ; }
Submits the job based on the arguments
40,719
public void registerInputOutput ( ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( formatLogString ( "Start registering input and output." ) ) ; } if ( this . userCodeClassLoader == null ) { try { this . userCodeClassLoader = LibraryCacheManager . getClassLoader ( getEnvironment ( ) . getJobID ( ) ) ; } catch ( IOException ioe ) { throw new RuntimeException ( "The ClassLoader for the user code could not be instantiated from the library cache." , ioe ) ; } } Configuration taskConf = getTaskConfiguration ( ) ; taskConf . setClassLoader ( this . userCodeClassLoader ) ; this . config = new TaskConfig ( taskConf ) ; final Class < ? extends PactDriver < S , OT > > driverClass = this . config . getDriver ( ) ; this . driver = InstantiationUtil . instantiate ( driverClass , PactDriver . class ) ; try { initInputReaders ( ) ; initBroadcastInputReaders ( ) ; } catch ( Exception e ) { throw new RuntimeException ( "Initializing the input streams failed" + e . getMessage ( ) == null ? "." : ": " + e . getMessage ( ) , e ) ; } try { initOutputs ( ) ; } catch ( Exception e ) { throw new RuntimeException ( "Initializing the output handlers failed" + e . getMessage ( ) == null ? "." : ": " + e . getMessage ( ) , e ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( formatLogString ( "Finished registering input and output." ) ) ; } }
Initialization method . Runs in the execution graph setup phase in the JobManager and as a setup method on the TaskManager .
40,720
protected static void reportAndClearAccumulators ( Environment env , Map < String , Accumulator < ? , ? > > accumulators , ArrayList < ChainedDriver < ? , ? > > chainedTasks ) { for ( ChainedDriver < ? , ? > chainedTask : chainedTasks ) { Map < String , Accumulator < ? , ? > > chainedAccumulators = chainedTask . getStub ( ) . getRuntimeContext ( ) . getAllAccumulators ( ) ; AccumulatorHelper . mergeInto ( accumulators , chainedAccumulators ) ; } if ( accumulators . size ( ) == 0 ) { return ; } synchronized ( env . getAccumulatorProtocolProxy ( ) ) { try { env . getAccumulatorProtocolProxy ( ) . reportAccumulatorResult ( new AccumulatorEvent ( env . getJobID ( ) , accumulators , true ) ) ; } catch ( IOException e ) { throw new RuntimeException ( "Communication with JobManager is broken. Could not send accumulators." , e ) ; } } AccumulatorHelper . resetAndClearAccumulators ( accumulators ) ; for ( ChainedDriver < ? , ? > chainedTask : chainedTasks ) { AccumulatorHelper . resetAndClearAccumulators ( chainedTask . getStub ( ) . getRuntimeContext ( ) . getAllAccumulators ( ) ) ; } }
This method is called at the end of a task receiving the accumulators of the task and the chained tasks . It merges them into a single map of accumulators and sends them to the JobManager .
40,721
protected void initInputsSerializersAndComparators ( int numInputs ) throws Exception { this . inputSerializers = new TypeSerializerFactory < ? > [ numInputs ] ; this . inputComparators = this . driver . requiresComparatorOnInput ( ) ? new TypeComparator [ numInputs ] : null ; this . inputIterators = new MutableObjectIterator [ numInputs ] ; for ( int i = 0 ; i < numInputs ; i ++ ) { final TypeSerializerFactory < ? > serializerFactory = this . config . getInputSerializer ( i , this . userCodeClassLoader ) ; this . inputSerializers [ i ] = serializerFactory ; if ( this . inputComparators != null ) { final TypeComparatorFactory < ? > comparatorFactory = this . config . getDriverComparator ( i , this . userCodeClassLoader ) ; this . inputComparators [ i ] = comparatorFactory . createComparator ( ) ; } this . inputIterators [ i ] = createInputIterator ( this . inputReaders [ i ] , this . inputSerializers [ i ] ) ; } }
Creates all the serializers and comparators .
40,722
public void setAllocatedResource ( final AllocatedResource resource ) { final Iterator < ExecutionVertex > it = this . vertices . iterator ( ) ; while ( it . hasNext ( ) ) { final ExecutionVertex vertex = it . next ( ) ; vertex . setAllocatedResource ( resource ) ; } }
Sets the given allocated resource for all vertices included in this pipeline .
40,723
public void updateExecutionState ( final ExecutionState executionState ) { final Iterator < ExecutionVertex > it = this . vertices . iterator ( ) ; while ( it . hasNext ( ) ) { final ExecutionVertex vertex = it . next ( ) ; vertex . updateExecutionState ( executionState ) ; } }
Updates the execution state for all vertices included in this pipeline .
40,724
public Thread getExecutingThread ( ) { synchronized ( this ) { if ( this . executingThread == null ) { if ( this . taskName == null ) { this . executingThread = new Thread ( this ) ; } else { this . executingThread = new Thread ( this , getTaskNameWithIndex ( ) ) ; } } return this . executingThread ; } }
Returns the thread which is assigned to execute the user code .
40,725
private void waitForInputChannelsToBeClosed ( ) throws IOException , InterruptedException { while ( true ) { if ( this . executionObserver . isCanceled ( ) ) { throw new InterruptedException ( ) ; } boolean allClosed = true ; for ( int i = 0 ; i < getNumberOfInputGates ( ) ; i ++ ) { final InputGate < ? extends IOReadableWritable > eig = this . inputGates . get ( i ) ; if ( ! eig . isClosed ( ) ) { allClosed = false ; } } if ( allClosed ) { break ; } else { Thread . sleep ( SLEEPINTERVAL ) ; } } }
Blocks until all input channels are closed .
40,726
private void closeInputGates ( ) throws IOException , InterruptedException { for ( int i = 0 ; i < this . inputGates . size ( ) ; i ++ ) { final InputGate < ? extends IOReadableWritable > eig = this . inputGates . get ( i ) ; eig . close ( ) ; } }
Closes all input gates which are not already closed .
40,727
private void requestAllOutputGatesToClose ( ) throws IOException , InterruptedException { for ( int i = 0 ; i < this . outputGates . size ( ) ; i ++ ) { this . outputGates . get ( i ) . requestClose ( ) ; } }
Requests all output gates to be closed .
40,728
public static void stopProxy ( VersionedProtocol proxy ) { if ( proxy != null ) { ( ( Invoker ) Proxy . getInvocationHandler ( proxy ) ) . close ( ) ; } }
Stop this proxy and release its invoker s resource
40,729
public static final String arrayToString ( Object array ) { if ( array == null ) { throw new NullPointerException ( ) ; } if ( array instanceof int [ ] ) { return Arrays . toString ( ( int [ ] ) array ) ; } if ( array instanceof long [ ] ) { return Arrays . toString ( ( long [ ] ) array ) ; } if ( array instanceof Object [ ] ) { return Arrays . toString ( ( Object [ ] ) array ) ; } if ( array instanceof byte [ ] ) { return Arrays . toString ( ( byte [ ] ) array ) ; } if ( array instanceof double [ ] ) { return Arrays . toString ( ( double [ ] ) array ) ; } if ( array instanceof float [ ] ) { return Arrays . toString ( ( float [ ] ) array ) ; } if ( array instanceof boolean [ ] ) { return Arrays . toString ( ( boolean [ ] ) array ) ; } if ( array instanceof char [ ] ) { return Arrays . toString ( ( char [ ] ) array ) ; } if ( array instanceof short [ ] ) { return Arrays . toString ( ( short [ ] ) array ) ; } if ( array . getClass ( ) . isArray ( ) ) { return "<unknown array type>" ; } else { throw new IllegalArgumentException ( "The given argument is no array." ) ; } }
Returns a string representation of the given array . This method takes an Object to allow also all types of primitive type arrays .
40,730
public LocalProperties filterByNodesConstantSet ( OptimizerNode node , int input ) { Ordering no = this . ordering ; FieldList ngf = this . groupedFields ; Set < FieldSet > nuf = this . uniqueFields ; if ( this . ordering != null ) { final FieldList involvedIndexes = this . ordering . getInvolvedIndexes ( ) ; for ( int i = 0 ; i < involvedIndexes . size ( ) ; i ++ ) { if ( ! node . isFieldConstant ( input , involvedIndexes . get ( i ) ) ) { if ( i == 0 ) { no = null ; ngf = null ; } else { no = this . ordering . createNewOrderingUpToIndex ( i ) ; ngf = no . getInvolvedIndexes ( ) ; } break ; } } } else if ( this . groupedFields != null ) { for ( Integer index : this . groupedFields ) { if ( ! node . isFieldConstant ( input , index ) ) { ngf = null ; } } } if ( this . uniqueFields != null ) { Set < FieldSet > s = new HashSet < FieldSet > ( this . uniqueFields ) ; for ( FieldSet fields : this . uniqueFields ) { for ( Integer index : fields ) { if ( ! node . isFieldConstant ( input , index ) ) { s . remove ( fields ) ; break ; } } } if ( s . size ( ) != this . uniqueFields . size ( ) ) { nuf = s ; } } return ( no == this . ordering && ngf == this . groupedFields && nuf == this . uniqueFields ) ? this : ( no == null && ngf == null && nuf == null ) ? new LocalProperties ( ) : new LocalProperties ( no , ngf , nuf ) ; }
Filters these properties by what can be preserved through a user function s constant fields set .
40,731
public static synchronized ExecutionSignature createSignature ( final Class < ? extends AbstractInvokable > invokableClass , final JobID jobID ) { if ( messageDigest == null ) { try { messageDigest = MessageDigest . getInstance ( HASHINGALGORITHM ) ; } catch ( NoSuchAlgorithmException e ) { LOG . error ( "Unable to load message digest algorithm " + HASHINGALGORITHM ) ; return null ; } } messageDigest . reset ( ) ; messageDigest . update ( invokableClass . getName ( ) . getBytes ( ) ) ; String [ ] requiredJarFiles ; try { requiredJarFiles = LibraryCacheManager . getRequiredJarFiles ( jobID ) ; } catch ( IOException ioe ) { LOG . error ( "Cannot access library cache manager for job ID " + jobID ) ; return null ; } Arrays . sort ( requiredJarFiles ) ; for ( int i = 0 ; i < requiredJarFiles . length ; i ++ ) { messageDigest . update ( requiredJarFiles [ i ] . getBytes ( ) ) ; } return new ExecutionSignature ( messageDigest . digest ( ) ) ; }
Calculates the execution signature from the given class name and job ID .
40,732
public void open ( InputSplit ignored ) throws IOException { try { establishConnection ( ) ; statement = dbConn . createStatement ( ResultSet . TYPE_SCROLL_INSENSITIVE , ResultSet . CONCUR_READ_ONLY ) ; resultSet = statement . executeQuery ( query ) ; } catch ( SQLException se ) { close ( ) ; throw new IllegalArgumentException ( "open() failed." + se . getMessage ( ) , se ) ; } catch ( ClassNotFoundException cnfe ) { throw new IllegalArgumentException ( "JDBC-Class not found. - " + cnfe . getMessage ( ) , cnfe ) ; } }
Connects to the source database and executes the query .
40,733
public OUT nextRecord ( OUT tuple ) throws IOException { try { resultSet . next ( ) ; if ( columnTypes == null ) { extractTypes ( tuple ) ; } addValue ( tuple ) ; return tuple ; } catch ( SQLException se ) { close ( ) ; throw new IOException ( "Couldn't read data - " + se . getMessage ( ) , se ) ; } catch ( NullPointerException npe ) { close ( ) ; throw new IOException ( "Couldn't access resultSet" , npe ) ; } }
Stores the next resultSet row in a tuple
40,734
public static org . apache . hadoop . conf . Configuration getHadoopConfiguration ( ) { Configuration retConf = new org . apache . hadoop . conf . Configuration ( ) ; final String hdfsDefaultPath = GlobalConfiguration . getString ( ConfigConstants . HDFS_DEFAULT_CONFIG , null ) ; if ( hdfsDefaultPath != null ) { retConf . addResource ( new org . apache . hadoop . fs . Path ( hdfsDefaultPath ) ) ; } else { LOG . debug ( "Cannot find hdfs-default configuration file" ) ; } final String hdfsSitePath = GlobalConfiguration . getString ( ConfigConstants . HDFS_SITE_CONFIG , null ) ; if ( hdfsSitePath != null ) { retConf . addResource ( new org . apache . hadoop . fs . Path ( hdfsSitePath ) ) ; } else { LOG . debug ( "Cannot find hdfs-site configuration file" ) ; } String [ ] possibleHadoopConfPaths = new String [ 4 ] ; possibleHadoopConfPaths [ 0 ] = GlobalConfiguration . getString ( ConfigConstants . PATH_HADOOP_CONFIG , null ) ; possibleHadoopConfPaths [ 1 ] = System . getenv ( "HADOOP_CONF_DIR" ) ; if ( System . getenv ( "HADOOP_HOME" ) != null ) { possibleHadoopConfPaths [ 2 ] = System . getenv ( "HADOOP_HOME" ) + "/conf" ; possibleHadoopConfPaths [ 3 ] = System . getenv ( "HADOOP_HOME" ) + "/etc/hadoop" ; } for ( int i = 0 ; i < possibleHadoopConfPaths . length ; i ++ ) { if ( possibleHadoopConfPaths [ i ] == null ) { continue ; } if ( new File ( possibleHadoopConfPaths [ i ] ) . exists ( ) ) { if ( new File ( possibleHadoopConfPaths [ i ] + "/core-site.xml" ) . exists ( ) ) { retConf . addResource ( new org . apache . hadoop . fs . Path ( possibleHadoopConfPaths [ i ] + "/core-site.xml" ) ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Adding " + possibleHadoopConfPaths [ i ] + "/core-site.xml to hadoop configuration" ) ; } } if ( new File ( possibleHadoopConfPaths [ i ] + "/hdfs-site.xml" ) . exists ( ) ) { retConf . addResource ( new org . apache . hadoop . fs . Path ( possibleHadoopConfPaths [ i ] + "/hdfs-site.xml" ) ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Adding " + possibleHadoopConfPaths [ i ] + "/hdfs-site.xml to hadoop configuration" ) ; } } } } return retConf ; }
Returns a new Hadoop Configuration object using the path to the hadoop conf configured in the Stratosphere configuration . This method is public because its being used in the HadoopDataSource .
40,735
public void readBlock ( MemorySegment segment ) throws IOException { checkErroneous ( ) ; this . requestsNotReturned . incrementAndGet ( ) ; if ( this . closed || this . requestQueue . isClosed ( ) ) { this . requestsNotReturned . decrementAndGet ( ) ; throw new IOException ( "The reader has been closed." ) ; } this . requestQueue . add ( new SegmentReadRequest ( this , segment ) ) ; }
Issues a read request which will asynchronously fill the given segment with the next block in the underlying file channel . Once the read request is fulfilled the segment will be added to this reader s return queue .
40,736
public DataSource < String > readTextFile ( String filePath ) { Validate . notNull ( filePath , "The file path may not be null." ) ; return new DataSource < String > ( this , new TextInputFormat ( new Path ( filePath ) ) , BasicTypeInfo . STRING_TYPE_INFO ) ; }
Creates a DataSet that represents the Strings produced by reading the given file line wise . The file will be read with the system s default character set .
40,737
public DataSource < Long > generateSequence ( long from , long to ) { return fromParallelCollection ( new NumberSequenceIterator ( from , to ) , BasicTypeInfo . LONG_TYPE_INFO ) ; }
Creates a new data set that contains a sequence of numbers . The data set will be created in parallel so there is no guarantee about the oder of the elements .
40,738
public void configure ( Configuration config ) { super . configure ( config ) ; final String delim = config . getString ( RECORD_DELIMITER , "\n" ) ; final String charsetName = config . getString ( RECORD_DELIMITER_ENCODING , null ) ; if ( delim == null ) { throw new IllegalArgumentException ( "The delimiter in the DelimitedOutputFormat must not be null." ) ; } try { this . delimiter = charsetName == null ? delim . getBytes ( ) : delim . getBytes ( charsetName ) ; } catch ( UnsupportedEncodingException useex ) { throw new IllegalArgumentException ( "The charset with the name '" + charsetName + "' is not supported on this TaskManager instance." , useex ) ; } this . bufferSize = config . getInteger ( WRITE_BUFFER_SIZE , DEFAULT_WRITE_BUFFER_SIZE ) ; if ( this . bufferSize < MIN_WRITE_BUFFER_SIZE ) { throw new IllegalArgumentException ( "The write buffer size must not be less than " + MIN_WRITE_BUFFER_SIZE + " bytes." ) ; } }
Calls the super classes to configure themselves and reads the config parameters for the delimiter and the write buffer size .
40,739
public void addBroadcastSetForMessagingFunction ( String name , DataSet < ? > data ) { this . bcVarsMessaging . add ( new Tuple2 < String , DataSet < ? > > ( name , data ) ) ; }
Adds a data set as a broadcast set to the messaging function .
40,740
public void addBroadcastSetForUpdateFunction ( String name , DataSet < ? > data ) { this . bcVarsUpdate . add ( new Tuple2 < String , DataSet < ? > > ( name , data ) ) ; }
Adds a data set as a broadcast set to the vertex update function .
40,741
public void setInput ( DataSet < Tuple2 < VertexKey , VertexValue > > inputData ) { TypeInformation < Tuple2 < VertexKey , VertexValue > > inputType = inputData . getType ( ) ; Validate . isTrue ( inputType . isTupleType ( ) && inputType . getArity ( ) == 2 , "The input data set (the initial vertices) must consist of 2-tuples." ) ; TypeInformation < VertexKey > keyType = ( ( TupleTypeInfo < ? > ) inputType ) . getTypeAt ( 0 ) ; TypeInformation < ? > edgeType = edgesWithoutValue != null ? edgesWithoutValue . getType ( ) : edgesWithValue . getType ( ) ; TypeInformation < VertexKey > edgeKeyType = ( ( TupleTypeInfo < ? > ) edgeType ) . getTypeAt ( 0 ) ; Validate . isTrue ( keyType . equals ( edgeKeyType ) , "The first tuple field (the vertex id) of the input data set (the initial vertices) " + "must be the same data type as the first fields of the edge data set (the source vertex id). " + "Here, the key type for the vertex ids is '%s' and the key type for the edges is '%s'." , keyType , edgeKeyType ) ; this . initialVertices = inputData ; }
Sets the input data set for this operator . In the case of this operator this input data set represents the set of vertices with their initial state .
40,742
public static final < VertexKey extends Comparable < VertexKey > , VertexValue , Message > VertexCentricIteration < VertexKey , VertexValue , Message , ? > withPlainEdges ( DataSet < Tuple2 < VertexKey , VertexKey > > edgesWithoutValue , VertexUpdateFunction < VertexKey , VertexValue , Message > vertexUpdateFunction , MessagingFunction < VertexKey , VertexValue , Message , ? > messagingFunction , int maximumNumberOfIterations ) { @ SuppressWarnings ( "unchecked" ) MessagingFunction < VertexKey , VertexValue , Message , Object > tmf = ( MessagingFunction < VertexKey , VertexValue , Message , Object > ) messagingFunction ; return new VertexCentricIteration < VertexKey , VertexValue , Message , Object > ( vertexUpdateFunction , tmf , edgesWithoutValue , maximumNumberOfIterations ) ; }
Creates a new vertex - centric iteration operator for graphs where the edges are not associated with a value .
40,743
public void processIncomingAccumulators ( JobID jobID , Map < String , Accumulator < ? , ? > > newAccumulators ) { synchronized ( this . jobAccumulators ) { JobAccumulators jobAccumulators = this . jobAccumulators . get ( jobID ) ; if ( jobAccumulators == null ) { jobAccumulators = new JobAccumulators ( ) ; this . jobAccumulators . put ( jobID , jobAccumulators ) ; cleanup ( jobID ) ; } jobAccumulators . processNew ( newAccumulators ) ; } }
Merges the new accumulators with the existing accumulators collected for the job .
40,744
public Map < String , Accumulator < ? , ? > > getJobAccumulators ( JobID jobID ) { JobAccumulators jobAccumulators = this . jobAccumulators . get ( jobID ) ; if ( jobAccumulators == null ) { return new HashMap < String , Accumulator < ? , ? > > ( ) ; } return jobAccumulators . getAccumulators ( ) ; }
Returns all collected accumulators for the job . For efficiency the internal accumulator is returned so please use it read - only .
40,745
private void cleanup ( JobID jobId ) { if ( ! lru . contains ( jobId ) ) { lru . addFirst ( jobId ) ; } if ( lru . size ( ) > this . maxEntries ) { JobID toRemove = lru . removeLast ( ) ; this . jobAccumulators . remove ( toRemove ) ; } }
Cleanup data for the oldest jobs if the maximum number of entries is reached .
40,746
public static void main ( String [ ] args ) { try { String configDir = null ; if ( args . length >= 2 && args [ 0 ] . equals ( "-configDir" ) ) { configDir = args [ 1 ] ; } if ( configDir == null ) { System . err . println ( "Error: Configuration directory must be specified.\nWebFrontend -configDir <directory>\n" ) ; System . exit ( 1 ) ; return ; } GlobalConfiguration . loadConfiguration ( configDir ) ; Configuration config = GlobalConfiguration . getConfiguration ( ) ; config . setString ( ConfigConstants . STRATOSPHERE_BASE_DIR_PATH_KEY , configDir + "/.." ) ; int port = config . getInteger ( ConfigConstants . WEB_FRONTEND_PORT_KEY , ConfigConstants . DEFAULT_WEBCLIENT_PORT ) ; WebInterfaceServer server = new WebInterfaceServer ( config , port ) ; LOG . info ( "Starting web frontend server on port " + port + '.' ) ; server . start ( ) ; server . join ( ) ; } catch ( Throwable t ) { LOG . error ( "Unexpected exception: " + t . getMessage ( ) , t ) ; } }
Main method . accepts a single parameter which is the config directory .
40,747
private long getLongInternal ( final String key , final long defaultValue ) { long retVal = defaultValue ; try { synchronized ( this . confData ) { if ( this . confData . containsKey ( key ) ) { retVal = Long . parseLong ( this . confData . get ( key ) ) ; } } } catch ( NumberFormatException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( StringUtils . stringifyException ( e ) ) ; } } return retVal ; }
Returns the value associated with the given key as a long integer .
40,748
private Configuration getConfigurationInternal ( final String [ ] keys ) { Configuration conf = new Configuration ( ) ; synchronized ( this . confData ) { final Iterator < String > it = this . confData . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { final String key = it . next ( ) ; boolean found = false ; if ( keys != null ) { for ( int i = 0 ; i < keys . length ; i ++ ) { if ( key . equals ( keys [ i ] ) ) { found = true ; break ; } } if ( found ) { conf . setString ( key , this . confData . get ( key ) ) ; } } else { conf . setString ( key , this . confData . get ( key ) ) ; } } } return conf ; }
Internal non - static method to return configuration .
40,749
private void includeConfigurationInternal ( final Configuration conf ) { if ( conf == null ) { LOG . error ( "Given configuration object is null, ignoring it..." ) ; return ; } synchronized ( this . confData ) { final Iterator < String > it = conf . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { final String key = it . next ( ) ; this . confData . put ( key , conf . getString ( key , "" ) ) ; } } }
Internal non - static method to include configuration .
40,750
public void setRangePartitioned ( Ordering partitionOrdering , DataDistribution distribution ) { if ( partitionOrdering . getNumberOfFields ( ) != distribution . getNumberOfFields ( ) ) { throw new IllegalArgumentException ( "The number of keys in the distribution must match number of ordered fields." ) ; } this . partitionOrdering = partitionOrdering ; this . distribution = distribution ; }
Sets the sink to partition the records into ranges over the given ordering . The bucket boundaries are determined using the given data distribution .
40,751
public void setMinimumNumberOfInstances ( final InstanceType instanceType , final int number ) { this . minimumMap . put ( instanceType , Integer . valueOf ( number ) ) ; }
Sets the minimum number of instances to be requested from the given instance type .
40,752
public void setMaximumNumberOfInstances ( final InstanceType instanceType , final int number ) { this . maximumMap . put ( instanceType , Integer . valueOf ( number ) ) ; }
Sets the maximum number of instances to be requested from the given instance type .
40,753
public void setNumberOfInstances ( final InstanceType instanceType , final int number ) { setMinimumNumberOfInstances ( instanceType , number ) ; setMaximumNumberOfInstances ( instanceType , number ) ; }
Sets both the minimum and the maximum number of instances to be requested from the given instance type .
40,754
public int getMinimumNumberOfInstances ( final InstanceType instanceType ) { final Integer val = this . minimumMap . get ( instanceType ) ; if ( val != null ) { return val . intValue ( ) ; } return 0 ; }
Returns the minimum number of instances to be requested from the given instance type .
40,755
public int getMaximumNumberOfInstances ( final InstanceType instanceType ) { final Integer val = this . maximumMap . get ( instanceType ) ; if ( val != null ) { return val . intValue ( ) ; } return 0 ; }
Returns the maximum number of instances to be requested from the given instance type .
40,756
public RequestedGlobalProperties filterByNodesConstantSet ( OptimizerNode node , int input ) { if ( this . ordering != null ) { for ( int col : this . ordering . getInvolvedIndexes ( ) ) { if ( ! node . isFieldConstant ( input , col ) ) { return null ; } } } else if ( this . partitioningFields != null ) { for ( int colIndex : this . partitioningFields ) { if ( ! node . isFieldConstant ( input , colIndex ) ) { return null ; } } } if ( this . partitioning == PartitioningProperty . FULL_REPLICATION ) { return null ; } return this ; }
Filters these properties by what can be preserved by the given node when propagated down to the given input .
40,757
public void parameterizeChannel ( Channel channel , boolean globalDopChange , boolean localDopChange ) { if ( isTrivial ( ) ) { channel . setShipStrategy ( globalDopChange ? ShipStrategyType . PARTITION_RANDOM : ShipStrategyType . FORWARD ) ; return ; } final GlobalProperties inGlobals = channel . getSource ( ) . getGlobalProperties ( ) ; if ( ! globalDopChange && isMetBy ( inGlobals ) ) { if ( localDopChange ) { if ( inGlobals . getPartitioning ( ) == PartitioningProperty . HASH_PARTITIONED ) { channel . setShipStrategy ( ShipStrategyType . PARTITION_LOCAL_HASH , inGlobals . getPartitioningFields ( ) ) ; return ; } } else { channel . setShipStrategy ( ShipStrategyType . FORWARD ) ; return ; } } switch ( this . partitioning ) { case FULL_REPLICATION : channel . setShipStrategy ( ShipStrategyType . BROADCAST ) ; break ; case ANY_PARTITIONING : case HASH_PARTITIONED : channel . setShipStrategy ( ShipStrategyType . PARTITION_HASH , Utils . createOrderedFromSet ( this . partitioningFields ) ) ; break ; case RANGE_PARTITIONED : channel . setShipStrategy ( ShipStrategyType . PARTITION_RANGE , this . ordering . getInvolvedIndexes ( ) , this . ordering . getFieldSortDirections ( ) ) ; if ( this . dataDistribution != null ) { channel . setDataDistribution ( this . dataDistribution ) ; } break ; default : throw new CompilerException ( ) ; } }
Parameterizes the ship strategy fields of a channel such that the channel produces the desired global properties .
40,758
static String convertScanToString ( Scan scan ) throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream ( ) ; DataOutputStream dos = new DataOutputStream ( out ) ; scan . write ( dos ) ; return Base64 . encodeBytes ( out . toByteArray ( ) ) ; }
Writes the given scan into a Base64 encoded string .
40,759
public static Scan convertStringToScan ( String base64 ) throws IOException { ByteArrayInputStream bis = new ByteArrayInputStream ( Base64 . decode ( base64 ) ) ; DataInputStream dis = new DataInputStream ( bis ) ; Scan scan = new Scan ( ) ; scan . readFields ( dis ) ; return scan ; }
Converts the given Base64 string back into a Scan instance .
40,760
private int incrementReferenceCounter ( final JobID jobID ) { while ( true ) { AtomicInteger ai = this . libraryReferenceCounter . get ( jobID ) ; if ( ai == null ) { ai = new AtomicInteger ( 1 ) ; if ( this . libraryReferenceCounter . putIfAbsent ( jobID , ai ) == null ) { return 1 ; } } else { return ai . incrementAndGet ( ) ; } } }
Increments the reference counter for the library manager entry with the given job ID .
40,761
private int decrementReferenceCounter ( final JobID jobID ) { final AtomicInteger ai = this . libraryReferenceCounter . get ( jobID ) ; if ( ai == null ) { throw new IllegalStateException ( "Cannot find reference counter entry for job " + jobID ) ; } int retVal = ai . decrementAndGet ( ) ; if ( retVal == 0 ) { this . libraryReferenceCounter . remove ( jobID ) ; } return retVal ; }
Decrements the reference counter for the library manager entry with the given job ID .
40,762
public static void register ( final JobID id , final String [ ] requiredJarFiles ) throws IOException { final LibraryCacheManager lib = get ( ) ; lib . registerInternal ( id , requiredJarFiles ) ; }
Registers a job ID with a set of library paths that are required to run the job . For every registered job the library cache manager creates a class loader that is used to instantiate the job s environment later on .
40,763
private void registerInternal ( final JobID id , final String [ ] requiredJarFiles ) throws IOException { while ( this . lockMap . putIfAbsent ( id , LOCK_OBJECT ) != null ) ; try { if ( incrementReferenceCounter ( id ) > 1 ) { return ; } if ( this . libraryManagerEntries . containsKey ( id ) ) { throw new IllegalStateException ( "Library cache manager already contains entry for job ID " + id ) ; } URL [ ] urls = null ; if ( requiredJarFiles != null ) { urls = new URL [ requiredJarFiles . length ] ; for ( int i = 0 ; i < requiredJarFiles . length ; i ++ ) { final Path p = contains ( requiredJarFiles [ i ] ) ; if ( p == null ) { throw new IOException ( requiredJarFiles [ i ] + " does not exist in the library cache" ) ; } try { urls [ i ] = p . toUri ( ) . toURL ( ) ; } catch ( MalformedURLException e ) { throw new IOException ( StringUtils . stringifyException ( e ) ) ; } } } final LibraryManagerEntry entry = new LibraryManagerEntry ( id , requiredJarFiles , urls ) ; this . libraryManagerEntries . put ( id , entry ) ; } finally { this . lockMap . remove ( id ) ; } }
Registers a job ID with a set of library paths that are required to run the job . For every registered job the library cache manager creates a class loader that is used to instantiate the vertex s environment later on .
40,764
public void getEventsForJob ( final JobID jobID , final List < AbstractEvent > eventList , final boolean includeManagementEvents ) { synchronized ( this . collectedEvents ) { List < AbstractEvent > eventsForJob = this . collectedEvents . get ( jobID ) ; if ( eventsForJob != null ) { final Iterator < AbstractEvent > it = eventsForJob . iterator ( ) ; while ( it . hasNext ( ) ) { final AbstractEvent event = it . next ( ) ; final boolean isManagementEvent = ( event instanceof ManagementEvent ) ; if ( ! isManagementEvent || includeManagementEvents ) { eventList . add ( event ) ; } } } } }
Retrieves and adds the collected events for the job with the given job ID to the provided list .
40,765
private void addEvent ( JobID jobID , AbstractEvent event ) { synchronized ( this . collectedEvents ) { List < AbstractEvent > eventList = this . collectedEvents . get ( jobID ) ; if ( eventList == null ) { eventList = new ArrayList < AbstractEvent > ( ) ; this . collectedEvents . put ( jobID , eventList ) ; } eventList . add ( event ) ; } }
Adds an event to the job s event list .
40,766
public void registerJob ( final ExecutionGraph executionGraph , final boolean profilingAvailable , final long submissionTimestamp ) { final Iterator < ExecutionVertex > it = new ExecutionGraphIterator ( executionGraph , true ) ; while ( it . hasNext ( ) ) { final ExecutionVertex vertex = it . next ( ) ; vertex . registerExecutionListener ( new ExecutionListenerWrapper ( this , vertex ) ) ; vertex . registerVertexAssignmentListener ( new VertexAssignmentListenerWrapper ( this , executionGraph . getJobID ( ) ) ) ; } executionGraph . registerJobStatusListener ( new JobStatusListenerWrapper ( this , executionGraph . getJobName ( ) , profilingAvailable , submissionTimestamp ) ) ; }
Registers a job in form of its execution graph representation with the job progress collector . The collector will subscribe to state changes of the individual subtasks . A separate deregistration is not necessary since the job progress collector periodically discards outdated progress information .
40,767
public void run ( ) { final long currentTime = System . currentTimeMillis ( ) ; synchronized ( this . collectedEvents ) { final Iterator < JobID > it = this . collectedEvents . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { final JobID jobID = it . next ( ) ; final List < AbstractEvent > eventList = this . collectedEvents . get ( jobID ) ; if ( eventList == null ) { continue ; } final Iterator < AbstractEvent > it2 = eventList . iterator ( ) ; while ( it2 . hasNext ( ) ) { final AbstractEvent event = it2 . next ( ) ; if ( ( event . getTimestamp ( ) + this . timerTaskInterval ) < currentTime ) { archiveEvent ( jobID , event ) ; it2 . remove ( ) ; } } if ( eventList . isEmpty ( ) ) { it . remove ( ) ; } } } synchronized ( this . recentJobs ) { final Iterator < Map . Entry < JobID , RecentJobEvent > > it = this . recentJobs . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { final Map . Entry < JobID , RecentJobEvent > entry = it . next ( ) ; final JobStatus jobStatus = entry . getValue ( ) . getJobStatus ( ) ; if ( jobStatus != JobStatus . FINISHED && jobStatus != JobStatus . CANCELED && jobStatus != JobStatus . FAILED ) { continue ; } if ( ( entry . getValue ( ) . getTimestamp ( ) + this . timerTaskInterval ) < currentTime ) { archiveJobevent ( entry . getKey ( ) , entry . getValue ( ) ) ; it . remove ( ) ; synchronized ( this . recentManagementGraphs ) { archiveManagementGraph ( entry . getKey ( ) , this . recentManagementGraphs . get ( entry . getKey ( ) ) ) ; this . recentManagementGraphs . remove ( entry . getValue ( ) ) ; } synchronized ( this . recentNetworkTopologies ) { archiveNetworkTopology ( entry . getKey ( ) , this . recentNetworkTopologies . get ( entry . getKey ( ) ) ) ; this . recentNetworkTopologies . remove ( entry . getValue ( ) ) ; } } } } }
This method will periodically be called to clean up expired collected events .
40,768
private void updateManagementGraph ( final JobID jobID , final VertexAssignmentEvent vertexAssignmentEvent ) { synchronized ( this . recentManagementGraphs ) { final ManagementGraph managementGraph = this . recentManagementGraphs . get ( jobID ) ; if ( managementGraph == null ) { return ; } final ManagementVertex vertex = managementGraph . getVertexByID ( vertexAssignmentEvent . getVertexID ( ) ) ; if ( vertex == null ) { return ; } vertex . setInstanceName ( vertexAssignmentEvent . getInstanceName ( ) ) ; vertex . setInstanceType ( vertexAssignmentEvent . getInstanceType ( ) ) ; } }
Applies changes in the vertex assignment to the stored management graph .
40,769
private void updateManagementGraph ( final JobID jobID , final ExecutionStateChangeEvent executionStateChangeEvent , String optionalMessage ) { synchronized ( this . recentManagementGraphs ) { final ManagementGraph managementGraph = this . recentManagementGraphs . get ( jobID ) ; if ( managementGraph == null ) { return ; } final ManagementVertex vertex = managementGraph . getVertexByID ( executionStateChangeEvent . getVertexID ( ) ) ; if ( vertex == null ) { return ; } vertex . setExecutionState ( executionStateChangeEvent . getNewExecutionState ( ) ) ; if ( executionStateChangeEvent . getNewExecutionState ( ) == ExecutionState . FAILED ) { vertex . setOptMessage ( optionalMessage ) ; } } }
Applies changes in the state of an execution vertex to the stored management graph .
40,770
public InputChannelResult readRecord ( T target ) throws IOException , InterruptedException { if ( this . channelToReadFrom == - 1 ) { if ( this . isClosed ( ) ) { return InputChannelResult . END_OF_STREAM ; } if ( Thread . interrupted ( ) ) { throw new InterruptedException ( ) ; } this . channelToReadFrom = waitForAnyChannelToBecomeAvailable ( ) ; } InputChannelResult result = this . getInputChannel ( this . channelToReadFrom ) . readRecord ( target ) ; switch ( result ) { case INTERMEDIATE_RECORD_FROM_BUFFER : return InputChannelResult . INTERMEDIATE_RECORD_FROM_BUFFER ; case LAST_RECORD_FROM_BUFFER : this . channelToReadFrom = - 1 ; return InputChannelResult . LAST_RECORD_FROM_BUFFER ; case END_OF_SUPERSTEP : this . channelToReadFrom = - 1 ; return InputChannelResult . END_OF_SUPERSTEP ; case TASK_EVENT : this . currentEvent = this . getInputChannel ( this . channelToReadFrom ) . getCurrentEvent ( ) ; this . channelToReadFrom = - 1 ; return InputChannelResult . TASK_EVENT ; case NONE : this . channelToReadFrom = - 1 ; return InputChannelResult . NONE ; case END_OF_STREAM : this . channelToReadFrom = - 1 ; return isClosed ( ) ? InputChannelResult . END_OF_STREAM : InputChannelResult . NONE ; default : throw new RuntimeException ( ) ; } }
Reads a record from one of the associated input channels . Channels are read such that one buffer from a channel is consecutively consumed . The buffers in turn are consumed in the order in which they arrive . Note that this method is not guaranteed to return a record because the currently available channel data may not always constitute an entire record when events or partial records are part of the data .
40,771
public void notifyRecordIsAvailable ( int channelIndex ) { this . availableChannels . add ( Integer . valueOf ( channelIndex ) ) ; RecordAvailabilityListener < T > listener = this . recordAvailabilityListener . get ( ) ; if ( listener != null ) { listener . reportRecordAvailability ( this ) ; } }
Notify the gate that the channel with the given index has at least one record available .
40,772
public void close ( ) throws IOException , InterruptedException { for ( int i = 0 ; i < this . getNumberOfInputChannels ( ) ; i ++ ) { final InputChannel < T > inputChannel = this . channels [ i ] ; inputChannel . close ( ) ; } }
Immediately closes the input gate and all its input channels . The corresponding output channels are notified . Any remaining records in any buffers or queue is considered irrelevant and is discarded .
40,773
private boolean hasWindowsDrive ( String path , boolean slashed ) { if ( ! OperatingSystem . isWindows ( ) ) { return false ; } final int start = slashed ? 1 : 0 ; return path . length ( ) >= start + 2 && ( slashed ? path . charAt ( 0 ) == '/' : true ) && path . charAt ( start + 1 ) == ':' && ( ( path . charAt ( start ) >= 'A' && path . charAt ( start ) <= 'Z' ) || ( path . charAt ( start ) >= 'a' && path . charAt ( start ) <= 'z' ) ) ; }
Checks if the provided path string contains a windows drive letter .
40,774
protected SequentialStatistics createStatistics ( List < FileStatus > files , FileBaseStatistics stats ) throws IOException { if ( files . isEmpty ( ) ) { return null ; } BlockInfo blockInfo = this . createBlockInfo ( ) ; long totalCount = 0 ; for ( FileStatus file : files ) { if ( file . getLen ( ) < blockInfo . getInfoSize ( ) ) { continue ; } FSDataInputStream fdis = file . getPath ( ) . getFileSystem ( ) . open ( file . getPath ( ) , blockInfo . getInfoSize ( ) ) ; fdis . seek ( file . getLen ( ) - blockInfo . getInfoSize ( ) ) ; DataInputStream input = new DataInputStream ( fdis ) ; blockInfo . read ( input ) ; totalCount += blockInfo . getAccumulatedRecordCount ( ) ; } final float avgWidth = totalCount == 0 ? 0 : ( ( float ) stats . getTotalInputSize ( ) / totalCount ) ; return new SequentialStatistics ( stats . getLastModificationTime ( ) , stats . getTotalInputSize ( ) , avgWidth , totalCount ) ; }
Fill in the statistics . The last modification time and the total input size are prefilled .
40,775
void addGate ( final ManagementGate gate ) { if ( gate . isInputGate ( ) ) { this . inputGates . add ( gate ) ; } else { this . outputGates . add ( gate ) ; } }
Adds a management gate to this vertex .
40,776
public ManagementGate getInputGate ( final int index ) { if ( index < this . inputGates . size ( ) ) { return this . inputGates . get ( index ) ; } return null ; }
Returns the input gate at the given index .
40,777
public ManagementGate getOutputGate ( final int index ) { if ( index < this . outputGates . size ( ) ) { return this . outputGates . get ( index ) ; } return null ; }
Returns the output gate at the given index .
40,778
public String toJson ( ) { StringBuilder json = new StringBuilder ( "" ) ; json . append ( "{" ) ; json . append ( "\"vertexid\": \"" + this . getID ( ) + "\"," ) ; json . append ( "\"vertexname\": \"" + StringUtils . escapeHtml ( this . toString ( ) ) + "\"," ) ; json . append ( "\"vertexstatus\": \"" + this . getExecutionState ( ) + "\"," ) ; json . append ( "\"vertexinstancename\": \"" + this . getInstanceName ( ) + "\"," ) ; json . append ( "\"vertexinstancetype\": \"" + this . getInstanceType ( ) + "\"" ) ; json . append ( "}" ) ; return json . toString ( ) ; }
Returns Json representation of this ManagementVertex
40,779
public void eventOccurred ( AbstractTaskEvent event ) { if ( event instanceof TerminationEvent ) { terminationSignaled = true ; } else if ( event instanceof AllWorkersDoneEvent ) { AllWorkersDoneEvent wde = ( AllWorkersDoneEvent ) event ; aggregatorNames = wde . getAggregatorNames ( ) ; aggregates = wde . getAggregates ( userCodeClassLoader ) ; } else { throw new IllegalArgumentException ( "Unknown event type." ) ; } latch . countDown ( ) ; }
barrier will release the waiting thread if an event occurs
40,780
private void writeJsonForJobs ( PrintWriter wrt , List < RecentJobEvent > jobs ) { try { wrt . write ( "[" ) ; for ( int i = 0 ; i < jobs . size ( ) ; i ++ ) { RecentJobEvent jobEvent = jobs . get ( i ) ; writeJsonForJob ( wrt , jobEvent ) ; if ( i != jobs . size ( ) - 1 ) { wrt . write ( "," ) ; } } wrt . write ( "]" ) ; } catch ( EofException eof ) { LOG . info ( "Info server for jobmanager: Connection closed by client, EofException" ) ; } catch ( IOException ioe ) { LOG . info ( "Info server for jobmanager: Connection closed by client, IOException" ) ; } }
Writes ManagementGraph as Json for all recent jobs
40,781
private void writeJsonForArchive ( PrintWriter wrt , List < RecentJobEvent > jobs ) { wrt . write ( "[" ) ; Collections . sort ( jobs , new Comparator < RecentJobEvent > ( ) { public int compare ( RecentJobEvent o1 , RecentJobEvent o2 ) { if ( o1 . getTimestamp ( ) < o2 . getTimestamp ( ) ) { return 1 ; } else { return - 1 ; } } } ) ; for ( int i = 0 ; i < jobs . size ( ) ; i ++ ) { RecentJobEvent jobEvent = jobs . get ( i ) ; wrt . write ( "{" ) ; wrt . write ( "\"jobid\": \"" + jobEvent . getJobID ( ) + "\"," ) ; wrt . write ( "\"jobname\": \"" + jobEvent . getJobName ( ) + "\"," ) ; wrt . write ( "\"status\": \"" + jobEvent . getJobStatus ( ) + "\"," ) ; wrt . write ( "\"time\": " + jobEvent . getTimestamp ( ) ) ; wrt . write ( "}" ) ; if ( i != jobs . size ( ) - 1 ) { wrt . write ( "," ) ; } } wrt . write ( "]" ) ; }
Writes Json with a list of currently archived jobs sorted by time
40,782
@ SuppressWarnings ( "static-access" ) public static void main ( String [ ] args ) throws IOException { Option configDirOpt = OptionBuilder . withArgName ( "config directory" ) . hasArg ( ) . withDescription ( "Specify configuration directory." ) . create ( "configDir" ) ; Option tempDir = OptionBuilder . withArgName ( "temporary directory (overwrites configured option)" ) . hasArg ( ) . withDescription ( "Specify temporary directory." ) . create ( ARG_CONF_DIR ) ; configDirOpt . setRequired ( true ) ; tempDir . setRequired ( false ) ; Options options = new Options ( ) ; options . addOption ( configDirOpt ) ; options . addOption ( tempDir ) ; CommandLineParser parser = new GnuParser ( ) ; CommandLine line = null ; try { line = parser . parse ( options , args ) ; } catch ( ParseException e ) { System . err . println ( "CLI Parsing failed. Reason: " + e . getMessage ( ) ) ; System . exit ( FAILURE_RETURN_CODE ) ; } String configDir = line . getOptionValue ( configDirOpt . getOpt ( ) , null ) ; String tempDirVal = line . getOptionValue ( tempDir . getOpt ( ) , null ) ; GlobalConfiguration . loadConfiguration ( configDir ) ; if ( tempDirVal != null && GlobalConfiguration . getString ( ConfigConstants . TASK_MANAGER_TMP_DIR_KEY , null ) == null ) { Configuration c = GlobalConfiguration . getConfiguration ( ) ; c . setString ( ConfigConstants . TASK_MANAGER_TMP_DIR_KEY , tempDirVal ) ; LOG . info ( "Setting temporary directory to " + tempDirVal ) ; GlobalConfiguration . includeConfiguration ( c ) ; } System . err . println ( "Configuration " + GlobalConfiguration . getConfiguration ( ) ) ; LOG . info ( "Current user " + UserGroupInformation . getCurrentUser ( ) . getShortUserName ( ) ) ; { long maxMemoryMiBytes = Runtime . getRuntime ( ) . maxMemory ( ) >>> 20 ; LOG . info ( "Starting TaskManager in a JVM with " + maxMemoryMiBytes + " MiBytes maximum heap size." ) ; } try { new TaskManager ( ) ; } catch ( Exception e ) { LOG . fatal ( "Taskmanager startup failed: " + e . getMessage ( ) , e ) ; System . exit ( FAILURE_RETURN_CODE ) ; } Object mon = new Object ( ) ; synchronized ( mon ) { try { mon . wait ( ) ; } catch ( InterruptedException ex ) { } } }
Entry point for the program .
40,783
private void runHeartbeatLoop ( ) { final long interval = GlobalConfiguration . getInteger ( ConfigConstants . TASK_MANAGER_HEARTBEAT_INTERVAL_KEY , ConfigConstants . DEFAULT_TASK_MANAGER_HEARTBEAT_INTERVAL ) ; while ( ! shutdownStarted . get ( ) ) { try { LOG . debug ( "heartbeat" ) ; this . jobManager . sendHeartbeat ( this . localInstanceConnectionInfo , this . hardwareDescription ) ; } catch ( IOException e ) { if ( shutdownStarted . get ( ) ) { break ; } else { LOG . error ( "Sending the heart beat caused an exception: " + e . getMessage ( ) , e ) ; } } try { Thread . sleep ( interval ) ; } catch ( InterruptedException e ) { if ( ! shutdownStarted . get ( ) ) { LOG . error ( "TaskManager heart beat loop was interrupted without shutdown." ) ; } } } }
This method send the periodic heartbeats .
40,784
private InetAddress getTaskManagerAddress ( InetSocketAddress jobManagerAddress ) throws IOException { AddressDetectionState strategy = AddressDetectionState . ADDRESS ; while ( true ) { Enumeration < NetworkInterface > e = NetworkInterface . getNetworkInterfaces ( ) ; while ( e . hasMoreElements ( ) ) { NetworkInterface n = e . nextElement ( ) ; Enumeration < InetAddress > ee = n . getInetAddresses ( ) ; while ( ee . hasMoreElements ( ) ) { InetAddress i = ee . nextElement ( ) ; switch ( strategy ) { case ADDRESS : if ( hasCommonPrefix ( jobManagerAddress . getAddress ( ) . getAddress ( ) , i . getAddress ( ) ) ) { if ( tryToConnect ( i , jobManagerAddress , strategy . getTimeout ( ) ) ) { LOG . info ( "Determined " + i + " as the TaskTracker's own IP address" ) ; return i ; } } break ; case FAST_CONNECT : case SLOW_CONNECT : boolean correct = tryToConnect ( i , jobManagerAddress , strategy . getTimeout ( ) ) ; if ( correct ) { LOG . info ( "Determined " + i + " as the TaskTracker's own IP address" ) ; return i ; } break ; default : throw new RuntimeException ( "Unkown address detection strategy: " + strategy ) ; } } } switch ( strategy ) { case ADDRESS : strategy = AddressDetectionState . FAST_CONNECT ; break ; case FAST_CONNECT : strategy = AddressDetectionState . SLOW_CONNECT ; break ; case SLOW_CONNECT : throw new RuntimeException ( "The TaskManager failed to detect its own IP address" ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Defaulting to detection strategy " + strategy ) ; } } }
Find out the TaskManager s own IP address .
40,785
private Task createAndRegisterTask ( final ExecutionVertexID id , final Configuration jobConfiguration , final RuntimeEnvironment environment ) throws InsufficientResourcesException , IOException { if ( id == null ) { throw new IllegalArgumentException ( "Argument id is null" ) ; } if ( environment == null ) { throw new IllegalArgumentException ( "Argument environment is null" ) ; } Task task ; synchronized ( this ) { final Task runningTask = this . runningTasks . get ( id ) ; boolean registerTask = true ; if ( runningTask == null ) { task = new Task ( id , environment , this ) ; } else { if ( runningTask instanceof Task ) { return null ; } else { task = runningTask ; registerTask = false ; } } if ( registerTask ) { this . channelManager . register ( task ) ; boolean enableProfiling = false ; if ( this . profiler != null && jobConfiguration . getBoolean ( ProfilingUtils . PROFILE_JOB_KEY , true ) ) { enableProfiling = true ; } if ( enableProfiling ) { task . registerProfiler ( this . profiler , jobConfiguration ) ; } this . runningTasks . put ( id , task ) ; } } return task ; }
Registers an newly incoming runtime task with the task manager .
40,786
private void unregisterTask ( final ExecutionVertexID id ) { synchronized ( this ) { final Task task = this . runningTasks . remove ( id ) ; if ( task == null ) { LOG . error ( "Cannot find task with ID " + id + " to unregister" ) ; return ; } for ( Entry < String , String > e : DistributedCache . getCachedFile ( task . getEnvironment ( ) . getJobConfiguration ( ) ) ) { this . fileCache . deleteTmpFile ( e . getKey ( ) , task . getJobID ( ) ) ; } this . channelManager . unregister ( id , task ) ; task . unregisterProfiler ( this . profiler ) ; task . unregisterMemoryManager ( this . memoryManager ) ; try { LibraryCacheManager . unregister ( task . getJobID ( ) ) ; } catch ( IOException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Unregistering the job vertex ID " + id + " caused an IOException" ) ; } } } }
Unregisters a finished or aborted task .
40,787
public void shutdown ( ) { if ( ! this . shutdownStarted . compareAndSet ( false , true ) ) { return ; } LOG . info ( "Shutting down TaskManager" ) ; this . heartbeatThread . interrupt ( ) ; try { this . heartbeatThread . join ( 1000 ) ; } catch ( InterruptedException e ) { } RPC . stopProxy ( this . jobManager ) ; RPC . stopProxy ( this . globalInputSplitProvider ) ; RPC . stopProxy ( this . lookupService ) ; RPC . stopProxy ( this . accumulatorProtocolProxy ) ; this . taskManagerServer . stop ( ) ; if ( this . profiler != null ) { this . profiler . shutdown ( ) ; } this . channelManager . shutdown ( ) ; if ( this . ioManager != null ) { this . ioManager . shutdown ( ) ; } if ( this . memoryManager != null ) { this . memoryManager . shutdown ( ) ; } this . fileCache . shutdown ( ) ; if ( this . executorService != null ) { this . executorService . shutdown ( ) ; try { this . executorService . awaitTermination ( 5000L , TimeUnit . MILLISECONDS ) ; } catch ( InterruptedException e ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( e ) ; } } } this . shutdownComplete = true ; }
Shuts the task manager down .
40,788
private static final void checkTempDirs ( final String [ ] tempDirs ) throws Exception { for ( int i = 0 ; i < tempDirs . length ; ++ i ) { final String dir = tempDirs [ i ] ; if ( dir == null ) { throw new Exception ( "Temporary file directory #" + ( i + 1 ) + " is null." ) ; } final File f = new File ( dir ) ; if ( ! f . exists ( ) ) { throw new Exception ( "Temporary file directory '" + f . getAbsolutePath ( ) + "' does not exist." ) ; } if ( ! f . isDirectory ( ) ) { throw new Exception ( "Temporary file directory '" + f . getAbsolutePath ( ) + "' is not a directory." ) ; } if ( ! f . canWrite ( ) ) { throw new Exception ( "Temporary file directory '" + f . getAbsolutePath ( ) + "' is not writable." ) ; } } }
Checks whether the given strings describe existing directories that are writable . If that is not the case an exception is raised .
40,789
private static void logVersionInformation ( ) { String version = JobManager . class . getPackage ( ) . getImplementationVersion ( ) ; String revision = "<unknown>" ; try { Properties properties = new Properties ( ) ; InputStream propFile = JobManager . class . getClassLoader ( ) . getResourceAsStream ( ".version.properties" ) ; if ( propFile != null ) { properties . load ( propFile ) ; revision = properties . getProperty ( "git.commit.id.abbrev" ) ; } } catch ( IOException e ) { LOG . info ( "Cannot determine code revision. Unable ro read version property file." ) ; } LOG . info ( "Starting Stratosphere JobManager (Version: " + version + ", Rev:" + revision + ")" ) ; }
Log Stratosphere version information .
40,790
public static void main ( String [ ] args ) { if ( System . getProperty ( "log4j.configuration" ) == null ) { Logger root = Logger . getRootLogger ( ) ; root . removeAllAppenders ( ) ; PatternLayout layout = new PatternLayout ( "%d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n" ) ; ConsoleAppender appender = new ConsoleAppender ( layout , "System.err" ) ; root . addAppender ( appender ) ; root . setLevel ( Level . INFO ) ; } JobManager jobManager ; try { jobManager = initialize ( args ) ; jobManager . startInfoServer ( ) ; } catch ( Exception e ) { LOG . fatal ( e . getMessage ( ) , e ) ; System . exit ( FAILURE_RETURN_CODE ) ; } Object w = new Object ( ) ; synchronized ( w ) { try { w . wait ( ) ; } catch ( InterruptedException e ) { } } }
Entry point for the program
40,791
private TaskCancelResult cancelJob ( final ExecutionGraph eg ) { TaskCancelResult errorResult = null ; final Iterator < ExecutionVertex > it = new ExecutionGraphIterator ( eg , eg . getIndexOfCurrentExecutionStage ( ) , false , true ) ; while ( it . hasNext ( ) ) { final ExecutionVertex vertex = it . next ( ) ; final TaskCancelResult result = vertex . cancelTask ( ) ; if ( result . getReturnCode ( ) != AbstractTaskResult . ReturnCode . SUCCESS ) { errorResult = result ; } } return errorResult ; }
Cancels all the tasks in the current and upper stages of the given execution graph .
40,792
public ManagementGraph getManagementGraph ( final JobID jobID ) throws IOException { ManagementGraph mg = this . eventCollector . getManagementGraph ( jobID ) ; if ( mg == null ) { if ( this . archive != null ) { mg = this . archive . getManagementGraph ( jobID ) ; } if ( mg == null ) { throw new IOException ( "Cannot find job with ID " + jobID ) ; } } return mg ; }
Returns current ManagementGraph from eventCollector and if not current from archive
40,793
public void startInfoServer ( ) { final Configuration config = GlobalConfiguration . getConfiguration ( ) ; try { int port = config . getInteger ( ConfigConstants . JOB_MANAGER_WEB_PORT_KEY , ConfigConstants . DEFAULT_JOB_MANAGER_WEB_FRONTEND_PORT ) ; server = new WebInfoServer ( config , port , this ) ; server . start ( ) ; } catch ( FileNotFoundException e ) { LOG . error ( e . getMessage ( ) , e ) ; } catch ( Exception e ) { LOG . error ( "Cannot instantiate info server: " + e . getMessage ( ) , e ) ; } }
Starts the Jetty Infoserver for the Jobmanager
40,794
public < T0 , T1 , T2 > DataSource < Tuple3 < T0 , T1 , T2 > > types ( Class < T0 > type0 , Class < T1 > type1 , Class < T2 > type2 ) { TupleTypeInfo < Tuple3 < T0 , T1 , T2 > > types = TupleTypeInfo . getBasicTupleTypeInfo ( type0 , type1 , type2 ) ; CsvInputFormat < Tuple3 < T0 , T1 , T2 > > inputFormat = new CsvInputFormat < Tuple3 < T0 , T1 , T2 > > ( path ) ; configureInputFormat ( inputFormat , type0 , type1 , type2 ) ; return new DataSource < Tuple3 < T0 , T1 , T2 > > ( executionContext , inputFormat , types ) ; }
Specifies the types for the CSV fields . This method parses the CSV data to a 3 - tuple which has fields of the specified types . This method is overloaded for each possible length of the tuples to support type safe creation of data sets through CSV parsing .
40,795
public Ordering appendOrdering ( Integer index , Class < ? extends Key < ? > > type , Order order ) { if ( index . intValue ( ) < 0 ) { throw new IllegalArgumentException ( "The key index must not be negative." ) ; } if ( order == null ) { throw new NullPointerException ( ) ; } if ( order == Order . NONE ) { throw new IllegalArgumentException ( "An ordering must not be created with a NONE order." ) ; } this . indexes . add ( index ) ; this . types . add ( type ) ; this . orders . add ( order ) ; return this ; }
Extends this ordering by appending an additional order requirement .
40,796
public void setHashPartitioned ( FieldList partitionedFields ) { this . partitioning = PartitioningProperty . HASH_PARTITIONED ; this . partitioningFields = partitionedFields ; this . ordering = null ; }
Sets the partitioning property for the global properties .
40,797
public GlobalProperties filterByNodesConstantSet ( OptimizerNode node , int input ) { if ( this . ordering != null ) { for ( int col : this . ordering . getInvolvedIndexes ( ) ) { if ( ! node . isFieldConstant ( input , col ) ) { return new GlobalProperties ( ) ; } } } if ( this . partitioningFields != null ) { for ( int colIndex : this . partitioningFields ) { if ( ! node . isFieldConstant ( input , colIndex ) ) { return new GlobalProperties ( ) ; } } } if ( this . uniqueFieldCombinations != null ) { HashSet < FieldSet > newSet = new HashSet < FieldSet > ( ) ; newSet . addAll ( this . uniqueFieldCombinations ) ; for ( Iterator < FieldSet > combos = newSet . iterator ( ) ; combos . hasNext ( ) ; ) { FieldSet current = combos . next ( ) ; for ( Integer field : current ) { if ( ! node . isFieldConstant ( input , field ) ) { combos . remove ( ) ; break ; } } } if ( newSet . size ( ) != this . uniqueFieldCombinations . size ( ) ) { GlobalProperties gp = clone ( ) ; gp . uniqueFieldCombinations = newSet . isEmpty ( ) ? null : newSet ; return gp ; } } if ( this . partitioning == PartitioningProperty . FULL_REPLICATION ) { return new GlobalProperties ( ) ; } return this ; }
Filters these properties by what can be preserved through the given output contract .
40,798
public DeltaIteration < ST , WT > parallelism ( int parallelism ) { Validate . isTrue ( parallelism > 0 || parallelism == - 1 , "The degree of parallelism must be positive, or -1 (use default)." ) ; this . parallelism = parallelism ; return this ; }
Sets the degree of parallelism for the iteration .
40,799
private void showErrorPage ( HttpServletResponse resp , String message ) throws IOException { resp . setStatus ( HttpServletResponse . SC_OK ) ; resp . setContentType ( GUIServletStub . CONTENT_TYPE_HTML ) ; PrintWriter writer = resp . getWriter ( ) ; writer . println ( "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\"\n \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">" ) ; writer . println ( "<html>" ) ; writer . println ( "<head>" ) ; writer . println ( " <title>Launch Job - Error</title>" ) ; writer . println ( " <meta http-equiv=\"content-type\" content=\"text/html; charset=UTF-8\" />" ) ; writer . println ( " <link rel=\"stylesheet\" type=\"text/css\" href=\"css/nephelefrontend.css\" />" ) ; writer . println ( "</head>" ) ; writer . println ( "<body>" ) ; writer . println ( " <div class=\"mainHeading\">" ) ; writer . println ( " <h1><img src=\"img/StratosphereLogo.png\" width=\"326\" height=\"100\" alt=\"Stratosphere Logo\" align=\"middle\"/>Nephele and PACTs Query Interface</h1>" ) ; writer . println ( " </div>" ) ; writer . println ( " <div style=\"margin-top: 50px; text-align: center;\">" ) ; writer . println ( " <p class=\"error_text\" style=\"font-size: 18px;\">" ) ; writer . println ( message ) ; writer . println ( " </p><br/><br/>" ) ; writer . println ( " <form action=\"launch.html\" method=\"GET\">" ) ; writer . println ( " <input type=\"submit\" value=\"back\">" ) ; writer . println ( " </form>" ) ; writer . println ( " </div>" ) ; writer . println ( "</body>" ) ; writer . println ( "</html>" ) ; }
Prints the error page containing the given message .