idx
int64
0
41.2k
question
stringlengths
74
4.21k
target
stringlengths
5
888
36,500
private void submitFailureSlaEvent ( Dataset dataset , String eventName ) { try { CompactionSlaEventHelper . getEventSubmitterBuilder ( dataset , Optional . < Job > absent ( ) , this . fs ) . eventSubmitter ( this . eventSubmitter ) . eventName ( eventName ) . build ( ) . submit ( ) ; } catch ( Throwable t ) { LOG . wa...
Submit a failure sla event
36,501
public void close ( ) throws IOException { try { this . running = false ; this . accumulator . close ( ) ; if ( ! this . service . awaitTermination ( 60 , TimeUnit . SECONDS ) ) { forceClose ( ) ; } else { LOG . info ( "Closed properly: elapsed " + ( System . currentTimeMillis ( ) - startTime ) + " milliseconds" ) ; } ...
Close all the resources this will be blocked until all the request are sent and gets acknowledged
36,502
public boolean getTokens ( long tokens , long timeout , TimeUnit timeoutUnit ) throws InterruptedException { long timeoutMillis = timeoutUnit . toMillis ( timeout ) ; long wait = tryReserveTokens ( tokens , timeoutMillis ) ; if ( wait < 0 ) { return false ; } if ( wait == 0 ) { return true ; } Thread . sleep ( wait ) ;...
Attempt to get the specified amount of tokens within the specified timeout . If the tokens cannot be retrieved in the specified timeout the call will return false immediately otherwise the call will block until the tokens are available .
36,503
protected void addLineageSourceInfo ( WorkUnit workUnit , State state ) { if ( ! lineageInfo . isPresent ( ) ) { log . info ( "Lineage is not enabled" ) ; return ; } String platform = state . getProp ( ConfigurationKeys . SOURCE_FILEBASED_PLATFORM , DatasetConstants . PLATFORM_HDFS ) ; Path dataDir = new Path ( state ....
Add lineage source info to a single work unit
36,504
public List < String > getcurrentFsSnapshot ( State state ) { List < String > results = new ArrayList < > ( ) ; String path = getLsPattern ( state ) ; try { log . info ( "Running ls command with input " + path ) ; results = this . fsHelper . ls ( path ) ; for ( int i = 0 ; i < results . size ( ) ; i ++ ) { URI uri = ne...
This method is responsible for connecting to the source and taking a snapshot of the folder where the data is present it then returns a list of the files in String format
36,505
private boolean checkDataQuality ( Optional < Object > schema ) throws Exception { if ( this . branches > 1 ) { this . forkTaskState . setProp ( ConfigurationKeys . EXTRACTOR_ROWS_EXPECTED , this . taskState . getProp ( ConfigurationKeys . EXTRACTOR_ROWS_EXPECTED ) ) ; this . forkTaskState . setProp ( ConfigurationKeys...
Check data quality .
36,506
private void commitData ( ) throws IOException { if ( this . writer . isPresent ( ) ) { this . writer . get ( ) . commit ( ) ; } try { if ( GobblinMetrics . isEnabled ( this . taskState . getWorkunit ( ) ) ) { updateRecordMetrics ( ) ; updateByteMetrics ( ) ; } } catch ( IOException ioe ) { this . logger . error ( "Fai...
Commit task data .
36,507
private LinkedList < T > computeRecursiveTraversal ( T node , NodePath < T > nodePath ) { try { LinkedList < T > imports = new LinkedList < > ( ) ; Set < T > alreadyIncludedImports = new HashSet < > ( ) ; for ( T neighbor : this . traversalFunction . apply ( node ) ) { nodePath . appendNode ( neighbor ) ; addSubtravers...
Actually compute the traversal if it is not in the cache .
36,508
private void addSubtraversal ( T node , LinkedList < T > imports , Set < T > alreadyIncludedImports , NodePath < T > nodePath ) throws ExecutionException { if ( addNodeIfNotAlreadyIncluded ( node , imports , alreadyIncludedImports ) ) { for ( T inheritedFromParent : doTraverseGraphRecursively ( node , nodePath ) ) { ad...
Add a sub - traversal for a neighboring node .
36,509
private boolean addNodeIfNotAlreadyIncluded ( T thisImport , LinkedList < T > imports , Set < T > alreadyIncludedImports ) { if ( alreadyIncludedImports . contains ( thisImport ) ) { return false ; } imports . add ( thisImport ) ; alreadyIncludedImports . add ( thisImport ) ; return true ; }
Only add node to traversal if it is not already included in it .
36,510
private RuntimeException unpackExecutionException ( Throwable exc ) { while ( exc instanceof ExecutionException || exc instanceof UncheckedExecutionException ) { exc = exc . getCause ( ) ; } return Throwables . propagate ( exc ) ; }
Due to recursive nature of algorithm we may end up with multiple layers of exceptions . Unpack them .
36,511
public URI getListeningURI ( ) { try { return new URI ( this . serverUri . getScheme ( ) , this . serverUri . getUserInfo ( ) , this . serverUri . getHost ( ) , this . port , null , null , null ) ; } catch ( URISyntaxException use ) { throw new RuntimeException ( "Invalid URI. This is an error in code." , use ) ; } }
Get the scheme and authority at which this server is listening .
36,512
public void publishData ( Collection < ? extends WorkUnitState > states ) throws IOException { LOG . info ( "Start publishing data" ) ; int branches = this . state . getPropAsInt ( ConfigurationKeys . FORK_BRANCHES_KEY , 1 ) ; Set < String > emptiedDestTables = Sets . newHashSet ( ) ; final Connection conn = createConn...
1 . Truncate destination table if requested 2 . Move data from staging to destination 3 . Update Workunit state
36,513
public synchronized static void setJobSpecificOutputPaths ( State state ) { if ( ! StringUtils . containsIgnoreCase ( state . getProp ( ConfigurationKeys . WRITER_STAGING_DIR ) , state . getProp ( ConfigurationKeys . JOB_ID_KEY ) ) ) { state . setProp ( ConfigurationKeys . WRITER_STAGING_DIR , new Path ( state . getPro...
Each job gets its own task - staging and task - output directory . Update the staging and output directories to contain job_id . This is to make sure uncleaned data from previous execution does not corrupt final published data produced by this execution .
36,514
public void onFileChange ( Path path ) { String fileExtension = path . getName ( ) . substring ( path . getName ( ) . lastIndexOf ( '.' ) + 1 ) ; if ( fileExtension . equalsIgnoreCase ( SchedulerUtils . JOB_PROPS_FILE_EXTENSION ) ) { LOG . info ( "Detected change to common properties file " + path . toString ( ) ) ; lo...
Called when a job configuration file is changed .
36,515
private boolean checkCommonPropExistance ( Path rootPath , String noExtFileName ) throws IOException { Configuration conf = new Configuration ( ) ; FileStatus [ ] children = rootPath . getFileSystem ( conf ) . listStatus ( rootPath ) ; for ( FileStatus aChild : children ) { if ( aChild . getPath ( ) . getName ( ) . con...
Given the target rootPath check if there s common properties existed . Return false if so .
36,516
public Schema getSchemaByKey ( String key ) throws SchemaRegistryException { try { return cachedSchemasByKeys . get ( key ) ; } catch ( ExecutionException e ) { throw new SchemaRegistryException ( String . format ( "Schema with key %s cannot be retrieved" , key ) , e ) ; } }
Get schema from schema registry by key
36,517
protected Schema fetchSchemaByKey ( String key ) throws SchemaRegistryException { String schemaUrl = KafkaAvroSchemaRegistry . this . url + GET_RESOURCE_BY_ID + key ; GetMethod get = new GetMethod ( schemaUrl ) ; int statusCode ; String schemaString ; HttpClient httpClient = this . borrowClient ( ) ; try { statusCode =...
Fetch schema by key .
36,518
public static String getDatasetUri ( Table table ) { return HIVE_DATASETS_CONFIG_PREFIX + table . getDbName ( ) + Path . SEPARATOR + table . getTableName ( ) ; }
Get the dataset uri for a hive db and table . The uri is relative to the store uri .
36,519
public final Future < RecordMetadata > enqueue ( D record , WriteCallback callback ) throws InterruptedException { final ReentrantLock lock = this . dqLock ; lock . lock ( ) ; try { BytesBoundedBatch last = dq . peekLast ( ) ; if ( last != null ) { Future < RecordMetadata > future = null ; try { future = last . tryAppe...
Add a data to internal deque data structure
36,520
public void flush ( ) { try { ArrayList < Batch > batches = this . incomplete . all ( ) ; int numOutstandingRecords = 0 ; for ( Batch batch : batches ) { numOutstandingRecords += batch . getRecords ( ) . size ( ) ; } LOG . debug ( "Flush called on {} batches with {} records total" , batches . size ( ) , numOutstandingR...
This will block until all the incomplete batches are acknowledged
36,521
public static TopologySpec . Builder builder ( URI catalogURI , Properties topologyProps ) { String name = topologyProps . getProperty ( ConfigurationKeys . TOPOLOGY_NAME_KEY ) ; String group = topologyProps . getProperty ( ConfigurationKeys . TOPOLOGY_GROUP_KEY , "default" ) ; try { URI topologyURI = new URI ( catalog...
Creates a builder for the TopologySpec based on values in a topology properties config .
36,522
public JsonSchema getValuesWithinDataType ( ) { JsonElement element = this . getDataType ( ) . get ( MAP_ITEMS_KEY ) ; if ( element . isJsonObject ( ) ) { return new JsonSchema ( element . getAsJsonObject ( ) ) ; } if ( element . isJsonArray ( ) ) { return new JsonSchema ( element . getAsJsonArray ( ) ) ; } if ( elemen...
Fetches dataType . values from the JsonObject
36,523
public Type getTypeOfArrayItems ( ) throws DataConversionException { JsonSchema arrayValues = getItemsWithinDataType ( ) ; if ( arrayValues == null ) { throw new DataConversionException ( "Array types only allow values as primitive, null or JsonObject" ) ; } return arrayValues . getType ( ) ; }
Fetches the nested or primitive array items type from schema .
36,524
private String getTargetColumnName ( String sourceColumnName , String alias ) { String targetColumnName = alias ; Schema obj = this . getMetadataColumnMap ( ) . get ( sourceColumnName . toLowerCase ( ) ) ; if ( obj == null ) { targetColumnName = ( targetColumnName == null ? "unknown" + this . unknownColumnCounter : tar...
Get target column name if column is not found in metadata then name it as unknown column If alias is not found target column is nothing but source column
36,525
private void buildMetadataColumnMap ( JsonArray array ) { if ( array != null ) { for ( JsonElement columnElement : array ) { Schema schemaObj = gson . fromJson ( columnElement , Schema . class ) ; String columnName = schemaObj . getColumnName ( ) ; this . metadataColumnMap . put ( columnName . toLowerCase ( ) , schemaO...
Build metadata column map with column name and column schema object . Build metadata column list with list columns in metadata
36,526
private void updateDeltaFieldConfig ( String srcColumnName , String tgtColumnName ) { if ( this . workUnitState . contains ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY ) ) { String watermarkCol = this . workUnitState . getProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY ) ; this . workUnitState . setProp ( Configu...
Update water mark column property if there is an alias defined in query
36,527
private void updatePrimaryKeyConfig ( String srcColumnName , String tgtColumnName ) { if ( this . workUnitState . contains ( ConfigurationKeys . EXTRACT_PRIMARY_KEY_FIELDS_KEY ) ) { String primarykey = this . workUnitState . getProp ( ConfigurationKeys . EXTRACT_PRIMARY_KEY_FIELDS_KEY ) ; this . workUnitState . setProp...
Update primary key column property if there is an alias defined in query
36,528
private void parseInputQuery ( String query ) { List < String > projectedColumns = new ArrayList < > ( ) ; if ( StringUtils . isNotBlank ( query ) ) { String queryLowerCase = query . toLowerCase ( ) ; int startIndex = queryLowerCase . indexOf ( "select " ) + 7 ; int endIndex = queryLowerCase . indexOf ( " from " ) ; if...
Parse query provided in pull file Set input column projection - column projection in the input query Set columnAlias map - column and its alias mentioned in input query
36,529
private CommandOutput < ? , ? > executeSql ( List < Command > cmds ) { String query = null ; int fetchSize = 0 ; for ( Command cmd : cmds ) { if ( cmd instanceof JdbcCommand ) { JdbcCommandType type = ( JdbcCommandType ) cmd . getCommandType ( ) ; switch ( type ) { case QUERY : query = cmd . getParams ( ) . get ( 0 ) ;...
Execute query using JDBC simple Statement Set fetch size
36,530
private CommandOutput < ? , ? > executePreparedSql ( List < Command > cmds ) { String query = null ; List < String > queryParameters = null ; int fetchSize = 0 ; for ( Command cmd : cmds ) { if ( cmd instanceof JdbcCommand ) { JdbcCommandType type = ( JdbcCommandType ) cmd . getCommandType ( ) ; switch ( type ) { case ...
Execute query using JDBC PreparedStatement to pass query parameters Set fetch size
36,531
protected JdbcProvider createJdbcSource ( ) { String driver = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_DRIVER ) ; String userName = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_USERNAME ) ; String password = PasswordManager . getInstance ( this . workUnitState ) . readPasswor...
Create JDBC source to get connection
36,532
protected String concatPredicates ( List < Predicate > predicateList ) { List < String > conditions = new ArrayList < > ( ) ; for ( Predicate predicate : predicateList ) { conditions . add ( predicate . getCondition ( ) ) ; } return Joiner . on ( " and " ) . skipNulls ( ) . join ( conditions ) ; }
Concatenate all predicates with and clause
36,533
private JsonObject getDefaultWatermark ( ) { Schema schema = new Schema ( ) ; String dataType ; String columnName = "derivedwatermarkcolumn" ; schema . setColumnName ( columnName ) ; WatermarkType wmType = WatermarkType . valueOf ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_WATERMARK_TYPE , ...
Schema of default watermark column - required if there are multiple watermarks
36,534
private Schema getCustomColumnSchema ( String columnName ) { Schema schema = new Schema ( ) ; String dataType = "string" ; schema . setColumnName ( columnName ) ; String elementDataType = "string" ; List < String > mapSymbols = null ; JsonObject newDataType = this . convertDataType ( columnName , dataType , elementData...
Schema of a custom column - required if column not found in metadata
36,535
public static boolean hasJoinOperation ( String selectQuery ) { if ( selectQuery == null || selectQuery . length ( ) == 0 ) { return false ; } SqlParser sqlParser = SqlParser . create ( selectQuery ) ; try { SqlNode all = sqlParser . parseQuery ( ) ; SqlSelect query ; if ( all instanceof SqlSelect ) { query = ( SqlSele...
Check if the SELECT query has join operation
36,536
private String toCase ( String targetColumnName ) { String columnName = targetColumnName ; ColumnNameCase caseType = ColumnNameCase . valueOf ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_COLUMN_NAME_CASE , ConfigurationKeys . DEFAULT_COLUMN_NAME_CASE ) . toUpperCase ( ) ) ; switch ( caseType ) { case T...
Change the column name case to upper lower or nochange ; Default nochange
36,537
private GlobalOptions createGlobalOptions ( CommandLine parsedOpts ) { String host = parsedOpts . hasOption ( HOST_OPT ) ? parsedOpts . getOptionValue ( HOST_OPT ) : DEFAULT_REST_SERVER_HOST ; int port = DEFAULT_REST_SERVER_PORT ; try { if ( parsedOpts . hasOption ( PORT_OPT ) ) { port = Integer . parseInt ( parsedOpts...
Build the GlobalOptions information from the raw parsed options
36,538
public String replacePortTokens ( String value ) { BiMap < String , Optional < Integer > > portMappings = HashBiMap . create ( ) ; Matcher regexMatcher = PORT_REGEX . matcher ( value ) ; while ( regexMatcher . find ( ) ) { String token = regexMatcher . group ( 0 ) ; if ( ! portMappings . containsKey ( token ) ) { Optio...
Replaces any port tokens in the specified string .
36,539
protected Schema generateSchemaWithNullifiedField ( WorkUnitState workUnitState , Schema currentAvroSchema ) { Configuration conf = new Configuration ( ) ; for ( String key : workUnitState . getPropertyNames ( ) ) { conf . set ( key , workUnitState . getProp ( key ) ) ; } Path originalSchemaPath = null ; if ( workUnitS...
Generate new avro schema by nullifying fields that previously existed but not in the current schema .
36,540
public static void setWorkUnitGuid ( State state , Guid guid ) { state . setProp ( WORK_UNIT_GUID , guid . toString ( ) ) ; }
Set a unique replicable guid for this work unit . Used for recovering partially successful work units .
36,541
protected Schema getFieldSchema ( GenericRecord record , String schemaIdLocation ) throws Exception { Optional < Object > schemaIdValue = AvroUtils . getFieldValue ( record , schemaIdLocation ) ; if ( ! schemaIdValue . isPresent ( ) ) { throw new Exception ( "Schema id with key " + schemaIdLocation + " not found in the...
Get the schema of a field
36,542
protected byte [ ] getPayloadBytes ( GenericRecord inputRecord ) { try { return getFieldAsBytes ( inputRecord , payloadField ) ; } catch ( Exception e ) { return null ; } }
Get payload field and convert to byte array
36,543
protected byte [ ] getFieldAsBytes ( GenericRecord record , String fieldLocation ) throws Exception { Optional < Object > bytesValue = AvroUtils . getFieldValue ( record , fieldLocation ) ; if ( ! bytesValue . isPresent ( ) ) { throw new Exception ( "Bytes value with key " + fieldLocation + " not found in the record" )...
Get field value byte array
36,544
protected P upConvertPayload ( GenericRecord inputRecord ) throws DataConversionException { try { Schema payloadSchema = getPayloadSchema ( inputRecord ) ; latestPayloadReader . setSchema ( payloadSchema ) ; byte [ ] payloadBytes = getPayloadBytes ( inputRecord ) ; Decoder decoder = DecoderFactory . get ( ) . binaryDec...
Convert the payload in the input record to a deserialized object with the latest schema
36,545
protected void afterFork ( List < Boolean > forks , long startTimeNanos ) { int forksGenerated = 0 ; for ( Boolean fork : forks ) { forksGenerated += fork ? 1 : 0 ; } Instrumented . markMeter ( this . outputForks , forksGenerated ) ; Instrumented . updateTimer ( this . forkOperatorTimer , System . nanoTime ( ) - startT...
Called after forkDataRecord .
36,546
@ SuppressWarnings ( "unchecked" ) public D readRecordImpl ( D reuse ) throws DataRecordException , IOException { if ( this . shutdownRequested . get ( ) ) { return null ; } long readStartTime = System . nanoTime ( ) ; while ( ! allPartitionsFinished ( ) ) { if ( currentPartitionFinished ( ) ) { moveToNextPartition ( )...
Return the next decodable record from the current partition . If the current partition has no more decodable record move on to the next partition . If all partitions have been processed return null .
36,547
private void moveToNextPartition ( ) { if ( this . currentPartitionIdx == INITIAL_PARTITION_IDX ) { LOG . info ( "Pulling topic " + this . topicName ) ; this . currentPartitionIdx = 0 ; } else { updateStatisticsForCurrentPartition ( ) ; this . currentPartitionIdx ++ ; this . currentPartitionRecordCount = 0 ; this . cur...
Record the avg time per record for the current partition then increment this . currentPartitionIdx and switch metric context to the new partition .
36,548
protected void onSuccess ( AsyncRequest < D , RQ > asyncRequest , ResponseStatus status ) { final WriteResponse response = WriteResponse . EMPTY ; for ( final AsyncRequest . Thunk thunk : asyncRequest . getThunks ( ) ) { WriteCallback callback = ( WriteCallback ) thunk . callback ; callback . onSuccess ( new WriteRespo...
Callback on sending the asyncRequest successfully
36,549
protected void onFailure ( AsyncRequest < D , RQ > asyncRequest , Throwable throwable ) { for ( AsyncRequest . Thunk thunk : asyncRequest . getThunks ( ) ) { thunk . callback . onFailure ( throwable ) ; } }
Callback on failing to send the asyncRequest
36,550
public synchronized boolean cancel ( ) { if ( this . taskFuture != null && this . taskFuture . cancel ( true ) ) { this . taskStateTracker . onTaskRunCompletion ( this ) ; return true ; } else { return false ; } }
return true if the task is successfully cancelled . This method is a copy of the method in parent class . We need this copy so TaskIFaceWrapper variables are not shared between this class and its parent class
36,551
public Schema convertSchema ( Schema inputSchema , WorkUnitState workUnit ) throws SchemaConversionException { if ( this . fieldRemover . isPresent ( ) ) { return this . fieldRemover . get ( ) . removeFields ( inputSchema ) ; } return inputSchema ; }
Remove the specified fields from inputSchema .
36,552
public Iterable < GenericRecord > convertRecordImpl ( Schema outputSchema , GenericRecord inputRecord , WorkUnitState workUnit ) throws DataConversionException { try { return new SingleRecordIterable < > ( AvroUtils . convertRecordSchema ( inputRecord , outputSchema ) ) ; } catch ( IOException e ) { throw new DataConve...
Convert the schema of inputRecord to outputSchema .
36,553
private Collection < String > getPages ( String startDate , String endDate , List < Dimension > dimensions , ApiDimensionFilter countryFilter , Queue < Pair < String , FilterOperator > > toProcess , int rowLimit ) { String country = GoogleWebmasterFilter . countryFilterToString ( countryFilter ) ; ConcurrentLinkedDeque...
Get all pages in an async mode .
36,554
private ArrayList < String > getUrlPartitions ( String prefix ) { ArrayList < String > expanded = new ArrayList < > ( ) ; for ( char c = 'a' ; c <= 'z' ; ++ c ) { expanded . add ( prefix + c ) ; } for ( int num = 0 ; num <= 9 ; ++ num ) { expanded . add ( prefix + num ) ; } expanded . add ( prefix + "-" ) ; expanded . ...
This doesn t cover all cases but more than 99 . 9% captured .
36,555
public void addListener ( final PathAlterationListener listener ) { if ( listener != null ) { this . listeners . put ( listener , new ExceptionCatchingPathAlterationListenerDecorator ( listener ) ) ; } }
Add a file system listener .
36,556
public void initialize ( ) throws IOException { rootEntry . refresh ( rootEntry . getPath ( ) ) ; final FileStatusEntry [ ] children = doListPathsEntry ( rootEntry . getPath ( ) , rootEntry ) ; rootEntry . setChildren ( children ) ; }
Initialize the observer .
36,557
public void checkAndNotify ( ) throws IOException { for ( final PathAlterationListener listener : listeners . values ( ) ) { listener . onStart ( this ) ; } final Path rootPath = rootEntry . getPath ( ) ; if ( fs . exists ( rootPath ) ) { checkAndNotify ( rootEntry , rootEntry . getChildren ( ) , listPaths ( rootPath )...
Check whether the file and its chlidren have been created modified or deleted .
36,558
private void checkAndNotify ( final FileStatusEntry parent , final FileStatusEntry [ ] previous , final Path [ ] currentPaths ) throws IOException { int c = 0 ; final FileStatusEntry [ ] current = currentPaths . length > 0 ? new FileStatusEntry [ currentPaths . length ] : FileStatusEntry . EMPTY_ENTRIES ; for ( final F...
Compare two file lists for files which have been created modified or deleted .
36,559
private FileStatusEntry createPathEntry ( final FileStatusEntry parent , final Path childPath ) throws IOException { final FileStatusEntry entry = parent . newChildInstance ( childPath ) ; entry . refresh ( childPath ) ; final FileStatusEntry [ ] children = doListPathsEntry ( childPath , entry ) ; entry . setChildren (...
Create a new FileStatusEntry for the specified file .
36,560
private FileStatusEntry [ ] doListPathsEntry ( Path path , FileStatusEntry entry ) throws IOException { final Path [ ] paths = listPaths ( path ) ; final FileStatusEntry [ ] children = paths . length > 0 ? new FileStatusEntry [ paths . length ] : FileStatusEntry . EMPTY_ENTRIES ; for ( int i = 0 ; i < paths . length ; ...
List the path in the format of FileStatusEntry array
36,561
private Path [ ] listPaths ( final Path path ) throws IOException { Path [ ] children = null ; ArrayList < Path > tmpChildrenPath = new ArrayList < > ( ) ; if ( fs . isDirectory ( path ) ) { FileStatus [ ] chiledrenFileStatus = pathFilter == null ? fs . listStatus ( path ) : fs . listStatus ( path , pathFilter ) ; for ...
List the contents of a directory denoted by Path
36,562
public FlowStatus getFlowStatus ( String flowName , String flowGroup , long flowExecutionId ) { FlowStatus flowStatus = null ; Iterator < JobStatus > jobStatusIterator = jobStatusRetriever . getJobStatusesForFlowExecution ( flowName , flowGroup , flowExecutionId ) ; if ( jobStatusIterator . hasNext ( ) ) { flowStatus =...
Get the flow status for a specific execution .
36,563
public boolean isFlowRunning ( String flowName , String flowGroup ) { List < FlowStatus > flowStatusList = getLatestFlowStatus ( flowName , flowGroup , 1 ) ; if ( flowStatusList == null || flowStatusList . isEmpty ( ) ) { return false ; } else { FlowStatus flowStatus = flowStatusList . get ( 0 ) ; Iterator < JobStatus ...
Return true if another instance of a flow is running . A flow is determined to be in the RUNNING state if any of the jobs in the flow are in the RUNNING state .
36,564
@ SuppressWarnings ( value = "unchecked" ) < T , K extends SharedResourceKey > SharedResourceFactoryResponse < T > getScopedFromCache ( final SharedResourceFactory < T , K , S > factory , final K key , final ScopeWrapper < S > scope , final SharedResourcesBrokerImpl < S > broker ) throws ExecutionException { RawJobBrok...
Get a scoped object from the cache .
36,565
private Iterator < JsonElement > getSoftDeletedRecords ( String schema , String entity , WorkUnit workUnit , List < Predicate > predicateList ) throws DataRecordException { return this . getRecordSet ( schema , entity , workUnit , predicateList ) ; }
Get soft deleted records using Rest Api
36,566
public boolean bulkApiLogin ( ) throws Exception { log . info ( "Authenticating salesforce bulk api" ) ; boolean success = false ; String hostName = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_HOST_NAME ) ; String apiVersion = this . workUnitState . getProp ( ConfigurationKeys . SOURCE_CONN_VERSION...
Login to salesforce
36,567
private BufferedReader getBulkBufferedReader ( int index ) throws AsyncApiException { return new BufferedReader ( new InputStreamReader ( this . bulkConnection . getQueryResultStream ( this . bulkJob . getId ( ) , this . bulkResultIdList . get ( index ) . getBatchId ( ) , this . bulkResultIdList . get ( index ) . getRe...
Get a buffered reader wrapping the query result stream for the result with the specified index
36,568
private void fetchResultBatchWithRetry ( RecordSetList < JsonElement > rs ) throws AsyncApiException , DataRecordException , IOException { boolean success = false ; int retryCount = 0 ; int recordCountBeforeFetch = this . bulkRecordCount ; do { try { if ( retryCount > 0 ) { reinitializeBufferedReader ( ) ; } fetchResul...
Fetch a result batch with retry for network errors
36,569
private RecordSet < JsonElement > getBulkData ( ) throws DataRecordException { log . debug ( "Processing bulk api batch..." ) ; RecordSetList < JsonElement > rs = new RecordSetList < > ( ) ; try { if ( this . bulkBufferedReader == null || ! this . bulkBufferedReader . ready ( ) ) { if ( this . bulkResultIdCount > 0 ) {...
Get data from the bulk api input stream
36,570
private BatchInfo waitForPkBatches ( BatchInfoList batchInfoList , int retryInterval ) throws InterruptedException , AsyncApiException { BatchInfo batchInfo = null ; BatchInfo [ ] batchInfos = batchInfoList . getBatchInfo ( ) ; for ( int i = 1 ; i < batchInfos . length ; i ++ ) { BatchInfo bi = batchInfos [ i ] ; bi = ...
Waits for the PK batches to complete . The wait will stop after all batches are complete or on the first failed batch
36,571
public static Map < String , Object > getConfigForBranch ( State taskState , int numBranches , int branch ) { String typePropertyName = ForkOperatorUtils . getPropertyNameForBranch ( ConfigurationKeys . WRITER_CODEC_TYPE , numBranches , branch ) ; String compressionType = taskState . getProp ( typePropertyName ) ; if (...
Retrieve configuration settings for a given branch .
36,572
public static String getCompressionType ( Map < String , Object > properties ) { return ( String ) properties . get ( COMPRESSION_TYPE_KEY ) ; }
Return compression type
36,573
public void close ( ) throws IOException { try { this . pool . returnObject ( this . object ) ; } catch ( Exception exc ) { throw new IOException ( exc ) ; } finally { this . returned = true ; } }
Return the borrowed object to the pool .
36,574
public Schema convertSchema ( Schema inputSchema , WorkUnitState workUnit ) throws SchemaConversionException { LOG . info ( "Converting schema " + inputSchema ) ; String fieldsStr = workUnit . getProp ( ConfigurationKeys . CONVERTER_AVRO_FIELD_PICK_FIELDS ) ; Preconditions . checkNotNull ( fieldsStr , ConfigurationKeys...
Convert the schema to contain only specified field . This will reuse AvroSchemaFieldRemover by listing fields not specified and remove it from the schema 1 . Retrieve list of fields from property 2 . Traverse schema and get list of fields to be removed 3 . While traversing also confirm specified fields from property al...
36,575
private static Schema createSchema ( Schema schema , String fieldsStr ) { List < String > fields = SPLITTER_ON_COMMA . splitToList ( fieldsStr ) ; TrieNode root = buildTrie ( fields ) ; return createSchemaHelper ( schema , root ) ; }
Creates Schema containing only specified fields .
36,576
private static Schema getActualRecord ( Schema inputSchema ) { if ( Type . RECORD . equals ( inputSchema . getType ( ) ) ) { return inputSchema ; } Preconditions . checkArgument ( Type . UNION . equals ( inputSchema . getType ( ) ) , "Nested schema is only support with either record or union type of null with record" )...
For the schema that is a UNION type with NULL and Record type it provides Records type .
36,577
public static String getSerializedWithNewPackage ( String serialized ) { serialized = serialized . replace ( "\"gobblin.data.management." , "\"org.apache.gobblin.data.management." ) ; log . debug ( "Serialized updated copy entity: " + serialized ) ; return serialized ; }
Converts package name in serialized string to new name . This is temporary change and should get removed after all the states are switched from old to new package name .
36,578
public static Properties mergeTemplateWithUserCustomizedFile ( Properties template , Properties userCustomized ) { Properties cleanedTemplate = new Properties ( ) ; cleanedTemplate . putAll ( template ) ; if ( cleanedTemplate . containsKey ( ConfigurationKeys . REQUIRED_ATRRIBUTES_LIST ) ) { cleanedTemplate . remove ( ...
create a complete property file based on the given template
36,579
protected void startCancellationExecutor ( ) { this . cancellationExecutor . execute ( new Runnable ( ) { public void run ( ) { synchronized ( AbstractJobLauncher . this . cancellationRequest ) { try { while ( ! AbstractJobLauncher . this . cancellationRequested ) { AbstractJobLauncher . this . cancellationRequest . wa...
Start the scheduled executor for executing job cancellation .
36,580
private boolean tryLockJob ( Properties properties ) { try { if ( Boolean . valueOf ( properties . getProperty ( ConfigurationKeys . JOB_LOCK_ENABLED_KEY , Boolean . TRUE . toString ( ) ) ) ) { this . jobLockOptional = Optional . of ( getJobLock ( properties , new JobLockEventListener ( ) { public void onLost ( ) { exe...
Try acquiring the job lock and return whether the lock is successfully locked .
36,581
private void unlockJob ( ) { if ( this . jobLockOptional . isPresent ( ) ) { try { this . jobLockOptional . get ( ) . unlock ( ) ; } catch ( JobLockException ioe ) { LOG . error ( String . format ( "Failed to unlock for job %s: %s" , this . jobContext . getJobId ( ) , ioe ) , ioe ) ; } finally { try { this . jobLockOpt...
Unlock a completed or failed job .
36,582
private void cleanLeftoverStagingData ( WorkUnitStream workUnits , JobState jobState ) throws JobException { if ( jobState . getPropAsBoolean ( ConfigurationKeys . CLEANUP_STAGING_DATA_BY_INITIALIZER , false ) ) { return ; } try { if ( ! canCleanStagingData ( jobState ) ) { LOG . error ( "Job " + jobState . getJobName ...
Cleanup the left - over staging data possibly from the previous run of the job that may have failed and not cleaned up its staging data .
36,583
private void cleanupStagingData ( JobState jobState ) throws JobException { if ( jobState . getPropAsBoolean ( ConfigurationKeys . CLEANUP_STAGING_DATA_BY_INITIALIZER , false ) ) { return ; } try { if ( ! canCleanStagingData ( jobState ) ) { LOG . error ( "Job " + jobState . getJobName ( ) + " has unfinished commit seq...
Cleanup the job s task staging data . This is not doing anything in case job succeeds and data is successfully committed because the staging data has already been moved to the job output directory . But in case the job fails and data is not committed we want the staging data to be cleaned up .
36,584
private boolean canCleanStagingData ( JobState jobState ) throws IOException { return this . jobContext . getSemantics ( ) != DeliverySemantics . EXACTLY_ONCE || ! this . jobContext . getCommitSequenceStore ( ) . get ( ) . exists ( jobState . getJobName ( ) ) ; }
Staging data cannot be cleaned if exactly once semantics is used and the job has unfinished commit sequences .
36,585
private void doRunningStateChange ( RunningState newState ) { RunningState oldState = null ; JobExecutionStateListener stateListener = null ; this . changeLock . lock ( ) ; try { if ( null == this . runningState ) { Preconditions . checkState ( RunningState . PENDING == newState ) ; } else { Preconditions . checkState ...
This must be called only when holding changeLock
36,586
protected void setStagingFileGroup ( ) throws IOException { Preconditions . checkArgument ( this . fs . exists ( this . stagingFile ) , String . format ( "Staging output file %s does not exist" , this . stagingFile ) ) ; if ( this . group . isPresent ( ) ) { HadoopUtils . setGroup ( this . fs , this . stagingFile , thi...
Set the group name of the staging output file .
36,587
public static WorkUnit queryResultMaterializationWorkUnit ( String query , HiveConverterUtils . StorageFormat storageFormat , StageableTableMetadata destinationTable ) { WorkUnit workUnit = new WorkUnit ( ) ; workUnit . setProp ( MATERIALIZER_MODE_KEY , MaterializerMode . QUERY_RESULT_MATERIALIZATION . name ( ) ) ; wor...
Create a work unit to materialize a query to a target table using a staging table in between .
36,588
public void filterSkippedTaskStates ( ) { List < TaskState > skippedTaskStates = new ArrayList < > ( ) ; for ( TaskState taskState : this . taskStates . values ( ) ) { if ( taskState . getWorkingState ( ) == WorkUnitState . WorkingState . SKIPPED ) { skippedTaskStates . add ( taskState ) ; } } for ( TaskState taskState...
Filter the task states corresponding to the skipped work units and add it to the skippedTaskStates
36,589
public int getCompletedTasks ( ) { int completedTasks = 0 ; for ( TaskState taskState : this . taskStates . values ( ) ) { if ( taskState . isCompleted ( ) ) { completedTasks ++ ; } } return completedTasks ; }
Get the number of completed tasks .
36,590
protected void writeStateSummary ( JsonWriter jsonWriter ) throws IOException { jsonWriter . name ( "job name" ) . value ( this . getJobName ( ) ) . name ( "job id" ) . value ( this . getJobId ( ) ) . name ( "job state" ) . value ( this . getState ( ) . name ( ) ) . name ( "start time" ) . value ( this . getStartTime (...
Write a summary to the json document
36,591
public static String newAppId ( String appName ) { String appIdSuffix = String . format ( "%s_%d" , appName , System . currentTimeMillis ( ) ) ; return "app_" + appIdSuffix ; }
Create a new app ID .
36,592
private boolean taskSuccessfulInPriorAttempt ( String taskId ) { if ( this . taskStateStoreOptional . isPresent ( ) ) { StateStore < TaskState > taskStateStore = this . taskStateStoreOptional . get ( ) ; try { if ( taskStateStore . exists ( jobId , taskId + TASK_STATE_STORE_SUCCESS_MARKER_SUFFIX ) ) { log . info ( "Ski...
Determine if the task executed successfully in a prior attempt by checkitn the task state store for the success marker .
36,593
public static GobblinMultiTaskAttempt runWorkUnits ( JobContext jobContext , Iterator < WorkUnit > workUnits , TaskStateTracker taskStateTracker , TaskExecutor taskExecutor , CommitPolicy multiTaskAttemptCommitPolicy ) throws IOException , InterruptedException { GobblinMultiTaskAttempt multiTaskAttempt = new GobblinMul...
FIXME this method is provided for backwards compatibility in the LocalJobLauncher since it does not access the task state store . This should be addressed as all task executions should be updating the task state .
36,594
void storeJobExecutionInfo ( ) { if ( this . jobHistoryStoreOptional . isPresent ( ) ) { try { this . logger . info ( "Writing job execution information to the job history store" ) ; this . jobHistoryStoreOptional . get ( ) . put ( this . jobState . toJobExecutionInfo ( ) ) ; } catch ( IOException ioe ) { this . logger...
Store job execution information into the job history store .
36,595
void commit ( final boolean isJobCancelled ) throws IOException { this . datasetStatesByUrns = Optional . of ( computeDatasetStatesByUrns ( ) ) ; final boolean shouldCommitDataInJob = shouldCommitDataInJob ( this . jobState ) ; final DeliverySemantics deliverySemantics = DeliverySemantics . parse ( this . jobState ) ; ...
Commit the job based on whether the job is cancelled .
36,596
protected Callable < Void > createSafeDatasetCommit ( boolean shouldCommitDataInJob , boolean isJobCancelled , DeliverySemantics deliverySemantics , String datasetUrn , JobState . DatasetState datasetState , boolean isMultithreaded , JobContext jobContext ) { return new SafeDatasetCommit ( shouldCommitDataInJob , isJob...
The only reason for this methods is so that we can test the parallelization of commits . DO NOT OVERRIDE .
36,597
private static void validateInput ( State state ) { int branches = state . getPropAsInt ( ConfigurationKeys . FORK_BRANCHES_KEY , 1 ) ; Set < String > publishTables = Sets . newHashSet ( ) ; for ( int branchId = 0 ; branchId < branches ; branchId ++ ) { String publishTable = Preconditions . checkNotNull ( getProp ( sta...
1 . User should not define same destination table across different branches . 2 . User should not define same staging table across different branches . 3 . If commit policy is not full Gobblin will try to write into final table even there s a failure . This will let Gobblin to write in task level . However publish data...
36,598
public static boolean canProxyAs ( String userNameToProxyAs , String superUserName , Path superUserKeytabLocation ) { try { loginAndProxyAsUser ( userNameToProxyAs , superUserName , superUserKeytabLocation ) ; } catch ( IOException e ) { return false ; } return true ; }
Returns true if superUserName can proxy as userNameToProxyAs using the specified superUserKeytabLocation false otherwise .
36,599
protected JobSpec jobSpecGenerator ( FlowSpec flowSpec ) { JobSpec jobSpec ; JobSpec . Builder jobSpecBuilder = JobSpec . builder ( jobSpecURIGenerator ( flowSpec ) ) . withConfig ( flowSpec . getConfig ( ) ) . withDescription ( flowSpec . getDescription ( ) ) . withVersion ( flowSpec . getVersion ( ) ) ; if ( flowSpec...
Naive implementation of generating jobSpec which fetch the first available template in an exemplified single - hop FlowCompiler implementation .