idx int64 0 41.2k | question stringlengths 74 4.21k | target stringlengths 5 888 |
|---|---|---|
36,200 | private Histogram getHistogramByDayBucketing ( SalesforceConnector connector , String entity , String watermarkColumn , Partition partition ) { Histogram histogram = new Histogram ( ) ; Calendar calendar = new GregorianCalendar ( ) ; Date startDate = Utils . toDate ( partition . getLowWatermark ( ) , Partitioner . WATE... | Get a histogram with day granularity buckets . |
36,201 | private Histogram getHistogram ( String entity , String watermarkColumn , SourceState state , Partition partition ) { SalesforceConnector connector = getConnector ( state ) ; try { if ( ! connector . connect ( ) ) { throw new RuntimeException ( "Failed to connect." ) ; } } catch ( RestApiConnectionException e ) { throw... | Generate the histogram |
36,202 | private Retryer < Void > buildRetryer ( State state ) { RetryerBuilder < Void > builder = null ; if ( writer instanceof Retriable ) { builder = ( ( Retriable ) writer ) . getRetryerBuilder ( ) ; } else { builder = createRetryBuilder ( state ) ; } if ( GobblinMetrics . isEnabled ( state ) ) { final Optional < Meter > re... | Build Retryer . - If Writer implements Retriable it will use the RetryerBuilder from the writer . - Otherwise it will use DEFAULT writer builder . |
36,203 | public static List < Partition > getPartitions ( IMetaStoreClient client , Table table , Optional < String > filter ) throws IOException { return getPartitions ( client , table , filter , Optional . < HivePartitionExtendedFilter > absent ( ) ) ; } | For backward compatibility when PathFilter is injected as a parameter . |
36,204 | public static Set < Path > getPaths ( InputFormat < ? , ? > inputFormat , Path location ) throws IOException { JobConf jobConf = new JobConf ( getHadoopConfiguration ( ) ) ; Set < Path > paths = Sets . newHashSet ( ) ; FileInputFormat . addInputPaths ( jobConf , location . toString ( ) ) ; InputSplit [ ] splits = input... | Get paths from a Hive location using the provided input format . |
36,205 | public static String getPathForBranch ( State state , String path , int numBranches , int branchId ) { Preconditions . checkNotNull ( state ) ; Preconditions . checkNotNull ( path ) ; Preconditions . checkArgument ( numBranches >= 0 , "The number of branches is expected to be non-negative" ) ; Preconditions . checkArgu... | Get a new path with the given branch name as a sub directory . |
36,206 | protected static long getCreateTime ( Table table ) { return TimeUnit . MILLISECONDS . convert ( table . getTTable ( ) . getCreateTime ( ) , TimeUnit . SECONDS ) ; } | Convert createTime from seconds to milliseconds |
36,207 | private void silenceHiveLoggers ( ) { List < String > loggers = ImmutableList . of ( "org.apache.hadoop.hive" , "org.apache.hive" , "hive.ql.parse" ) ; for ( String name : loggers ) { Logger logger = Logger . getLogger ( name ) ; if ( logger != null ) { logger . setLevel ( Level . WARN ) ; } } } | Hive logging is too verbose at INFO level . Currently hive does not have a way to set log level . This is a workaround to set log level to WARN for hive loggers only |
36,208 | private void commitDataset ( Collection < TaskState > taskStates , DataPublisher publisher ) { try { publisher . publish ( taskStates ) ; } catch ( Throwable t ) { log . error ( "Failed to commit dataset" , t ) ; setTaskFailureException ( taskStates , t ) ; } } | Commit the output data of a dataset . |
36,209 | private boolean canCommitDataset ( JobState . DatasetState datasetState ) { return this . jobContext . getJobCommitPolicy ( ) == JobCommitPolicy . COMMIT_ON_PARTIAL_SUCCESS || this . jobContext . getJobCommitPolicy ( ) == JobCommitPolicy . COMMIT_SUCCESSFUL_TASKS || ( this . jobContext . getJobCommitPolicy ( ) == JobCo... | Check if it is OK to commit the output data of a dataset . |
36,210 | private void persistDatasetState ( String datasetUrn , JobState . DatasetState datasetState ) throws IOException { log . info ( "Persisting dataset state for dataset " + datasetUrn ) ; this . jobContext . getDatasetStateStore ( ) . persistDatasetState ( datasetUrn , datasetState ) ; } | Persist dataset state of a given dataset identified by the dataset URN . |
36,211 | private Path getOrGenerateSchemaFile ( Schema schema ) throws IOException { Preconditions . checkNotNull ( schema , "Avro Schema should not be null" ) ; String hashedSchema = Hashing . sha256 ( ) . hashString ( schema . toString ( ) , StandardCharsets . UTF_8 ) . toString ( ) ; if ( ! this . schemaPaths . containsKey (... | If url for schema already exists return the url . If not create a new temporary schema file and return a the url . |
36,212 | public static void writeStringAsText ( DataOutput stream , String str ) throws IOException { byte [ ] utf8Encoded = str . getBytes ( StandardCharsets . UTF_8 ) ; writeVLong ( stream , utf8Encoded . length ) ; stream . write ( utf8Encoded ) ; } | Serialize a String using the same logic as a Hadoop Text object |
36,213 | public static String readTextAsString ( DataInput in ) throws IOException { int bufLen = ( int ) readVLong ( in ) ; byte [ ] buf = new byte [ bufLen ] ; in . readFully ( buf ) ; return new String ( buf , StandardCharsets . UTF_8 ) ; } | Deserialize a Hadoop Text object into a String |
36,214 | private static void writeVLong ( DataOutput stream , long i ) throws IOException { if ( i >= - 112 && i <= 127 ) { stream . writeByte ( ( byte ) i ) ; return ; } int len = - 112 ; if ( i < 0 ) { i ^= - 1L ; len = - 120 ; } long tmp = i ; while ( tmp != 0 ) { tmp = tmp >> 8 ; len -- ; } stream . writeByte ( ( byte ) len... | From org . apache . hadoop . io . WritableUtis |
36,215 | private static long readVLong ( DataInput stream ) throws IOException { byte firstByte = stream . readByte ( ) ; int len = decodeVIntSize ( firstByte ) ; if ( len == 1 ) { return firstByte ; } long i = 0 ; for ( int idx = 0 ; idx < len - 1 ; idx ++ ) { byte b = stream . readByte ( ) ; i = i << 8 ; i = i | ( b & 0xFF ) ... | Reads a zero - compressed encoded long from input stream and returns it . |
36,216 | private boolean canRun ( String flowName , String flowGroup , boolean allowConcurrentExecution ) { if ( allowConcurrentExecution ) { return true ; } else { return ! flowStatusGenerator . isFlowRunning ( flowName , flowGroup ) ; } } | Check if the flow instance is allowed to run . |
36,217 | private synchronized void getAllPreviousOffsetState ( SourceState state ) { if ( this . doneGettingAllPreviousOffsets ) { return ; } this . previousOffsets . clear ( ) ; this . previousLowWatermarks . clear ( ) ; this . previousExpectedHighWatermarks . clear ( ) ; this . previousOffsetFetchEpochTimes . clear ( ) ; this... | this . previousOffsetFetchEpochTimes need to be initialized once |
36,218 | public static String getCoalesceColumnNames ( String columnOrColumnList ) { if ( Strings . isNullOrEmpty ( columnOrColumnList ) ) { return null ; } if ( columnOrColumnList . contains ( "," ) ) { return "COALESCE(" + columnOrColumnList + ")" ; } return columnOrColumnList ; } | Get coalesce of columns if there are multiple comma - separated columns |
36,219 | public static String printTiming ( long start , long end ) { long totalMillis = end - start ; long mins = TimeUnit . MILLISECONDS . toMinutes ( totalMillis ) ; long secs = TimeUnit . MILLISECONDS . toSeconds ( totalMillis ) - TimeUnit . MINUTES . toSeconds ( mins ) ; long millis = TimeUnit . MILLISECONDS . toMillis ( t... | Print time difference in minutes seconds and milliseconds |
36,220 | public static List < String > getColumnListFromQuery ( String query ) { if ( Strings . isNullOrEmpty ( query ) ) { return null ; } String queryLowerCase = query . toLowerCase ( ) ; int startIndex = queryLowerCase . indexOf ( "select " ) + 7 ; int endIndex = queryLowerCase . indexOf ( " from " ) ; if ( startIndex < 0 ||... | get column list from the user provided query to build schema with the respective columns |
36,221 | public static String escapeSpecialCharacters ( String columnName , String escapeChars , String character ) { if ( Strings . isNullOrEmpty ( columnName ) ) { return null ; } if ( StringUtils . isEmpty ( escapeChars ) ) { return columnName ; } List < String > specialChars = Arrays . asList ( escapeChars . split ( "," ) )... | escape characters in column name or table name |
36,222 | public static long getLongWithCurrentDate ( String value , String timezone ) { if ( Strings . isNullOrEmpty ( value ) ) { return 0 ; } DateTime time = getCurrentTime ( timezone ) ; DateTimeFormatter dtFormatter = DateTimeFormat . forPattern ( CURRENT_DATE_FORMAT ) . withZone ( time . getZone ( ) ) ; if ( value . toUppe... | Helper method for getting a value containing CURRENTDAY - 1 or CURRENTHOUR - 1 in the form yyyyMMddHHmmss |
36,223 | public static String dateTimeToString ( DateTime input , String format , String timezone ) { String tz = StringUtils . defaultString ( timezone , ConfigurationKeys . DEFAULT_SOURCE_TIMEZONE ) ; DateTimeZone dateTimeZone = getTimeZone ( tz ) ; DateTimeFormatter outputDtFormat = DateTimeFormat . forPattern ( format ) . w... | Convert joda time to a string in the given format |
36,224 | public static DateTime getCurrentTime ( String timezone ) { String tz = StringUtils . defaultString ( timezone , ConfigurationKeys . DEFAULT_SOURCE_TIMEZONE ) ; DateTimeZone dateTimeZone = getTimeZone ( tz ) ; DateTime currentTime = new DateTime ( dateTimeZone ) ; return currentTime ; } | Get current time - joda |
36,225 | public static DateTime toDateTime ( String input , String format , String timezone ) { String tz = StringUtils . defaultString ( timezone , ConfigurationKeys . DEFAULT_SOURCE_TIMEZONE ) ; DateTimeZone dateTimeZone = getTimeZone ( tz ) ; DateTimeFormatter inputDtFormat = DateTimeFormat . forPattern ( format ) . withZone... | Convert timestamp in a string format to joda time |
36,226 | public static DateTime toDateTime ( long input , String format , String timezone ) { return toDateTime ( Long . toString ( input ) , format , timezone ) ; } | Convert timestamp in a long format to joda time |
36,227 | private static DateTimeZone getTimeZone ( String id ) { DateTimeZone zone ; try { zone = DateTimeZone . forID ( id ) ; } catch ( IllegalArgumentException e ) { throw new IllegalArgumentException ( "TimeZone " + id + " not recognized" ) ; } return zone ; } | Get time zone of time zone id |
36,228 | public synchronized void scheduleJob ( Properties jobProps , JobListener jobListener ) throws JobException { Map < String , Object > additionalJobDataMap = Maps . newHashMap ( ) ; additionalJobDataMap . put ( ServiceConfigKeys . GOBBLIN_SERVICE_FLOWSPEC , this . scheduledFlowSpecs . get ( jobProps . getProperty ( Confi... | Synchronize the job scheduling because the same flowSpec can be scheduled by different threads . |
36,229 | public static ArrayList < String > groupToPages ( Triple < String , GoogleWebmasterFilter . FilterOperator , UrlTrieNode > group ) { ArrayList < String > ret = new ArrayList < > ( ) ; if ( group . getMiddle ( ) . equals ( GoogleWebmasterFilter . FilterOperator . EQUALS ) ) { if ( group . getRight ( ) . isExist ( ) ) { ... | Get the detailed pages under this group |
36,230 | public static void deletePathByRegex ( FileSystem fs , final Path path , final String regex ) throws IOException { FileStatus [ ] statusList = fs . listStatus ( path , path1 -> path1 . getName ( ) . matches ( regex ) ) ; for ( final FileStatus oldJobFile : statusList ) { HadoopUtils . deletePath ( fs , oldJobFile . get... | Delete files according to the regular expression provided |
36,231 | public static void moveToTrash ( FileSystem fs , Path path ) throws IOException { Trash trash = new Trash ( fs , new Configuration ( ) ) ; trash . moveToTrash ( path ) ; } | Moves the object to the filesystem trash according to the file system policy . |
36,232 | public static boolean unsafeRenameIfNotExists ( FileSystem fs , Path from , Path to ) throws IOException { if ( ! fs . exists ( to ) ) { if ( ! fs . exists ( to . getParent ( ) ) ) { fs . mkdirs ( to . getParent ( ) ) ; } if ( ! renamePathHandleLocalFSRace ( fs , from , to ) ) { if ( ! fs . exists ( to ) ) { throw new ... | Renames from to to if to doesn t exist in a non - thread - safe way . |
36,233 | public static void setGroup ( FileSystem fs , Path path , String group ) throws IOException { fs . setOwner ( path , fs . getFileStatus ( path ) . getOwner ( ) , group ) ; } | Set the group associated with a given path . |
36,234 | public static void setPermissions ( Path location , Optional < String > owner , Optional < String > group , FileSystem fs , FsPermission permission ) { try { if ( ! owner . isPresent ( ) ) { return ; } if ( ! group . isPresent ( ) ) { return ; } fs . setOwner ( location , owner . get ( ) , group . get ( ) ) ; fs . setP... | Try to set owner and permissions for the path . Will not throw exception . |
36,235 | public List < String > generateQueries ( ) { ensureParentOfStagingPathExists ( ) ; List < String > hiveQueries = Lists . newArrayList ( ) ; hiveQueries . add ( "SET hive.exec.dynamic.partition.mode=nonstrict" ) ; Preconditions . checkNotNull ( this . workUnit , "Workunit must not be null" ) ; EventWorkunitUtils . setBe... | Returns hive queries to be run as a part of a hive task . This does not include publish queries . |
36,236 | public double setWorkUnitEstSizes ( Map < String , List < WorkUnit > > workUnitsByTopic ) { double totalEstDataSize = 0 ; for ( List < WorkUnit > workUnitsForTopic : workUnitsByTopic . values ( ) ) { for ( WorkUnit workUnit : workUnitsForTopic ) { setWorkUnitEstSize ( workUnit ) ; totalEstDataSize += getWorkUnitEstSize... | Calculate the total size of the workUnits and set the estimated size for each workUnit |
36,237 | public FlowStatus getFlowStatus ( FlowStatusId flowStatusId ) throws RemoteInvocationException { LOG . debug ( "getFlowConfig with groupName " + flowStatusId . getFlowGroup ( ) + " flowName " + flowStatusId . getFlowName ( ) ) ; GetRequest < FlowStatus > getRequest = _flowstatusesRequestBuilders . get ( ) . id ( new Co... | Get a flow status |
36,238 | private void submitJobToHelix ( JobConfig . Builder jobConfigBuilder ) throws Exception { HelixUtils . submitJobToWorkFlow ( jobConfigBuilder , this . helixWorkFlowName , this . jobContext . getJobId ( ) , this . helixTaskDriver , this . helixManager , this . workFlowExpiryTimeSeconds ) ; } | Submit a job to run . |
36,239 | private static List < ? extends Tag < ? > > addAdditionalMetadataTags ( Properties jobProps , List < ? extends Tag < ? > > inputTags ) { List < Tag < ? > > metadataTags = Lists . newArrayList ( inputTags ) ; String jobId ; if ( jobProps . containsKey ( ConfigurationKeys . JOB_ID_KEY ) ) { jobId = jobProps . getProperty... | Inject in some additional properties |
36,240 | public static Map < String , String > getClusterNameTags ( Configuration conf ) { ImmutableMap . Builder < String , String > tagMap = ImmutableMap . builder ( ) ; String clusterIdentifierTag = ClustersNames . getInstance ( ) . getClusterName ( conf ) ; if ( ! Strings . isNullOrEmpty ( clusterIdentifierTag ) ) { tagMap ... | Gets all useful Hadoop cluster metrics . |
36,241 | public static FlowSpec . Builder builder ( URI catalogURI , Properties flowProps ) { String name = flowProps . getProperty ( ConfigurationKeys . FLOW_NAME_KEY ) ; String group = flowProps . getProperty ( ConfigurationKeys . FLOW_GROUP_KEY , "default" ) ; try { URI flowURI = new URI ( catalogURI . getScheme ( ) , catalo... | Creates a builder for the FlowSpec based on values in a flow properties config . |
36,242 | protected boolean folderWithinAllowedPeriod ( Path inputFolder , DateTime folderTime ) { DateTime currentTime = new DateTime ( this . timeZone ) ; PeriodFormatter periodFormatter = getPeriodFormatter ( ) ; DateTime earliestAllowedFolderTime = getEarliestAllowedFolderTime ( currentTime , periodFormatter ) ; DateTime lat... | Return true iff input folder time is between compaction . timebased . min . time . ago and compaction . timebased . max . time . ago . |
36,243 | public Result verify ( FileSystemDataset dataset ) { Map < String , Double > thresholdMap = RecompactionConditionBasedOnRatio . getDatasetRegexAndRecompactThreshold ( state . getProp ( MRCompactor . COMPACTION_LATEDATA_THRESHOLD_FOR_RECOMPACT_PER_DATASET , StringUtils . EMPTY ) ) ; CompactionPathParser . CompactionPars... | There are two record count we are comparing here 1 ) The new record count in the input folder 2 ) The record count we compacted previously from last run Calculate two numbers difference and compare with a predefined threshold . |
36,244 | protected Collection < FileSystemDatasetVersion > listQualifiedRawFileSystemDatasetVersions ( Collection < FileSystemDatasetVersion > allVersions ) { return Lists . newArrayList ( Collections2 . filter ( allVersions , new Predicate < FileSystemDatasetVersion > ( ) { public boolean apply ( FileSystemDatasetVersion versi... | A raw dataset version is qualified to be deleted iff the corresponding refined paths exist and the latest mod time of all files is in the raw dataset is earlier than the latest mod time of all files in the refined paths . |
36,245 | public JdbcEntrySchema convertSchema ( Schema inputSchema , WorkUnitState workUnit ) throws SchemaConversionException { LOG . info ( "Converting schema " + inputSchema ) ; Preconditions . checkArgument ( Type . RECORD . equals ( inputSchema . getType ( ) ) , "%s is expected for the first level element in Avro schema %s... | Converts Avro schema to JdbcEntrySchema . |
36,246 | private String tryConvertAvroColNameToJdbcColName ( String avroColName ) { if ( ! avroToJdbcColPairs . isPresent ( ) ) { String converted = avroColName . replaceAll ( AVRO_NESTED_COLUMN_DELIMITER_REGEX_COMPATIBLE , JDBC_FLATTENED_COLUMN_DELIMITER ) ; jdbcToAvroColPairs . put ( converted , avroColName ) ; return convert... | Convert Avro column name to JDBC column name . If name mapping is defined follow it . Otherwise just return avro column name while replacing nested column delimiter dot to underscore . This method also updates mapping from JDBC column name to Avro column name for reverse look up . |
36,247 | public RecordEnvelope < D > readRecordEnvelopeImpl ( ) throws DataRecordException , IOException { if ( ! _isStarted . get ( ) ) { throw new IOException ( "Streaming extractor has not been started." ) ; } while ( ( _records == null ) || ( ! _records . hasNext ( ) ) ) { synchronized ( _consumer ) { if ( _close . get ( ) ... | Return the next record when available . Will never time out since this is a streaming source . |
36,248 | public void onFileDelete ( Path rawPath ) { URI jobSpecUri = this . converter . computeURI ( rawPath ) ; listeners . onDeleteJob ( jobSpecUri , null ) ; } | For already deleted job configuration file the only identifier is path it doesn t make sense to loadJobConfig Here . |
36,249 | public Future < ? > scheduleJobImmediately ( Properties jobProps , JobListener jobListener , JobLauncher jobLauncher ) { Callable < Void > callable = new Callable < Void > ( ) { public Void call ( ) throws JobException { try { runJob ( jobProps , jobListener , jobLauncher ) ; } catch ( JobException je ) { LOG . error (... | Schedule a job immediately . |
36,250 | public void unscheduleJob ( String jobName ) throws JobException { if ( this . scheduledJobs . containsKey ( jobName ) ) { try { this . scheduler . getScheduler ( ) . deleteJob ( this . scheduledJobs . remove ( jobName ) ) ; } catch ( SchedulerException se ) { LOG . error ( "Failed to unschedule and delete job " + jobN... | Unschedule and delete a job . |
36,251 | private void scheduleGeneralConfiguredJobs ( ) throws ConfigurationException , JobException , IOException { LOG . info ( "Scheduling configured jobs" ) ; for ( Properties jobProps : loadGeneralJobConfigs ( ) ) { if ( ! jobProps . containsKey ( ConfigurationKeys . JOB_SCHEDULE_KEY ) ) { jobProps . setProperty ( Configur... | Schedule Gobblin jobs in general position |
36,252 | private void startGeneralJobConfigFileMonitor ( ) throws Exception { SchedulerUtils . addPathAlterationObserver ( this . pathAlterationDetector , this . listener , jobConfigFileDirPath ) ; this . pathAlterationDetector . start ( ) ; this . closer . register ( new Closeable ( ) { public void close ( ) throws IOException... | Start the job configuration file monitor using generic file system API . |
36,253 | public Collection < DbAndTable > getTables ( ) throws IOException { List < DbAndTable > tables = Lists . newArrayList ( ) ; try ( AutoReturnableObject < IMetaStoreClient > client = this . clientPool . getClient ( ) ) { Iterable < String > databases = Iterables . filter ( client . get ( ) . getAllDatabases ( ) , new Pre... | Get all tables in db with given table pattern . |
36,254 | protected static Set < URI > getValidDatasetURIsHelper ( Collection < URI > allDatasetURIs , Set < URI > disabledURISet , Path datasetCommonRoot ) { if ( allDatasetURIs == null || allDatasetURIs . isEmpty ( ) ) { return ImmutableSet . of ( ) ; } Comparator < URI > pathLengthComparator = new Comparator < URI > ( ) { pub... | Extended signature for testing convenience . |
36,255 | public long getRecordCount ( Path filepath ) { String filename = filepath . getName ( ) ; Preconditions . checkArgument ( filename . startsWith ( M_OUTPUT_FILE_PREFIX ) || filename . startsWith ( MR_OUTPUT_FILE_PREFIX ) , String . format ( "%s is not a supported filename, which should start with %s, or %s." , filename ... | Get the record count through filename . |
36,256 | private void findPath ( Map < Spec , SpecExecutor > specExecutorInstanceMap , Spec spec ) { inMemoryWeightGraphGenerator ( ) ; FlowSpec flowSpec = ( FlowSpec ) spec ; if ( optionalUserSpecifiedPath . isPresent ( ) ) { log . info ( "Starting to evaluate user's specified path ... " ) ; if ( userSpecifiedPathVerificator (... | that a topologySpec not being reflected in findPath . |
36,257 | protected void populateEdgeTemplateMap ( ) { if ( templateCatalog . isPresent ( ) ) { for ( FlowEdge flowEdge : this . weightedGraph . edgeSet ( ) ) { edgeTemplateMap . put ( flowEdge . getEdgeIdentity ( ) , templateCatalog . get ( ) . getAllTemplates ( ) . stream ( ) . map ( jobTemplate -> jobTemplate . getUri ( ) ) .... | As the base implementation here all templates will be considered for each edge . |
36,258 | private boolean userSpecifiedPathVerificator ( Map < Spec , SpecExecutor > specExecutorInstanceMap , FlowSpec flowSpec ) { Map < Spec , SpecExecutor > tmpSpecExecutorInstanceMap = new HashMap < > ( ) ; List < String > userSpecfiedPath = Arrays . asList ( optionalUserSpecifiedPath . get ( ) . split ( "," ) ) ; for ( int... | else return true . |
36,259 | private void weightGraphGenerateHelper ( TopologySpec topologySpec ) { try { Map < ServiceNode , ServiceNode > capabilities = topologySpec . getSpecExecutor ( ) . getCapabilities ( ) . get ( ) ; for ( Map . Entry < ServiceNode , ServiceNode > capability : capabilities . entrySet ( ) ) { BaseServiceNodeImpl sourceNode =... | Helper function for transform TopologySpecMap into a weightedDirectedGraph . |
36,260 | private JobSpec convertHopToJobSpec ( ServiceNode sourceNode , ServiceNode targetNode , FlowSpec flowSpec ) { FlowEdge flowEdge = weightedGraph . getAllEdges ( sourceNode , targetNode ) . iterator ( ) . next ( ) ; URI templateURI = getTemplateURI ( sourceNode , targetNode , flowSpec , flowEdge ) ; return buildJobSpec (... | A naive implementation of resolving templates in each JobSpec among Multi - hop FlowSpec . Handle the case when edge is not specified . Always select the first available template . |
36,261 | public URI jobSpecURIGenerator ( Object ... objects ) { FlowSpec flowSpec = ( FlowSpec ) objects [ 0 ] ; ServiceNode sourceNode = ( ServiceNode ) objects [ 1 ] ; ServiceNode targetNode = ( ServiceNode ) objects [ 2 ] ; try { return new URI ( JobSpec . Builder . DEFAULT_JOB_CATALOG_SCHEME , flowSpec . getUri ( ) . getAu... | A naive implementation of generating a jobSpec s URI within a multi - hop logical Flow . |
36,262 | private PathFilter getFileFilter ( ) { final String extension = ( this . expectedExtension . startsWith ( "." ) ) ? this . expectedExtension : "." + this . expectedExtension ; return new PathFilter ( ) { public boolean accept ( Path path ) { return path . getName ( ) . endsWith ( extension ) && ! ( schemaInSourceDir &&... | This method is to filter out files that don t need to be processed by extension |
36,263 | private static Deserializer getDeserializer ( HiveRegistrationUnit unit ) { Optional < String > serdeClass = unit . getSerDeType ( ) ; if ( ! serdeClass . isPresent ( ) ) { return null ; } String serde = serdeClass . get ( ) ; HiveConf hiveConf ; Deserializer deserializer ; try { hiveConf = SharedResourcesBrokerFactory... | Returns a Deserializer from HiveRegistrationUnit if present and successfully initialized . Else returns null . |
36,264 | public synchronized List < JobSpec > getJobs ( ) { return Lists . transform ( Lists . newArrayList ( loader . loadPullFilesRecursively ( loader . getRootDirectory ( ) , this . sysConfig , shouldLoadGlobalConf ( ) ) ) , this . converter ) ; } | Fetch all the job files under the jobConfDirPath |
36,265 | public synchronized JobSpec getJobSpec ( URI uri ) throws JobSpecNotFoundException { try { Path targetJobSpecFullPath = getPathForURI ( this . jobConfDirPath , uri ) ; return this . converter . apply ( loader . loadPullFile ( targetJobSpecFullPath , this . sysConfig , shouldLoadGlobalConf ( ) ) ) ; } catch ( FileNotFou... | Fetch single job file based on its URI return null requested URI not existed |
36,266 | private void readPrevAvgRecordMillis ( SourceState state ) { Map < String , List < Double > > prevAvgMillis = Maps . newHashMap ( ) ; for ( WorkUnitState workUnitState : state . getPreviousWorkUnitStates ( ) ) { List < KafkaPartition > partitions = KafkaUtils . getPartitions ( workUnitState ) ; for ( KafkaPartition par... | Get avg time to pull a record in the previous run for all topics each of which is the geometric mean of the avg time to pull a record of all partitions of the topic . |
36,267 | private Optional < DataFileVersionStrategy > initDataFileVersionStrategy ( EndPoint endPoint , ReplicationConfiguration rc , Properties props ) { if ( ! ( endPoint instanceof HadoopFsEndPoint ) ) { log . warn ( "Data file version currently only handle the Hadoop Fs EndPoint replication" ) ; return Optional . absent ( )... | Get the version strategy that can retrieve the data file version from the end point . |
36,268 | public void closeCurrentFile ( ) { try { this . closer . close ( ) ; } catch ( IOException e ) { if ( this . currentFile != null ) { LOG . error ( "Failed to close file: " + this . currentFile , e ) ; } } } | Closes the current file being read . |
36,269 | private boolean ensureHiveTableExistenceBeforeAlternation ( String tableName , String dbName , IMetaStoreClient client , Table table , HiveSpec spec ) throws TException { try ( AutoCloseableLock lock = this . locks . getTableLock ( dbName , tableName ) ) { try { try ( Timer . Context context = this . metricContext . ti... | If table existed on Hive side will return false ; Or will create the table thru . RPC and return retVal from remote MetaStore . |
36,270 | private boolean ensureHiveDbExistence ( String hiveDbName , IMetaStoreClient client ) throws IOException { try ( AutoCloseableLock lock = this . locks . getDbLock ( hiveDbName ) ) { Database db = new Database ( ) ; db . setName ( hiveDbName ) ; try { try ( Timer . Context context = this . metricContext . timer ( GET_HI... | If databse existed on Hive side will return false ; Or will create the table thru . RPC and return retVal from remote MetaStore . |
36,271 | public boolean isCompleted ( ) { WorkingState state = getWorkingState ( ) ; return state == WorkingState . SUCCESSFUL || state == WorkingState . COMMITTED || state == WorkingState . FAILED ; } | Return whether the task has completed running or not . |
36,272 | public synchronized void updateByteMetrics ( long bytesWritten , int branchIndex ) { TaskMetrics metrics = TaskMetrics . get ( this ) ; String forkBranchId = TaskMetrics . taskInstanceRemoved ( this . taskId ) ; Counter taskByteCounter = metrics . getCounter ( MetricGroup . TASK . name ( ) , forkBranchId , BYTES ) ; lo... | Collect byte - level metrics . |
36,273 | public void adjustJobMetricsOnRetry ( int branches ) { TaskMetrics metrics = TaskMetrics . get ( this ) ; for ( int i = 0 ; i < branches ; i ++ ) { String forkBranchId = ForkOperatorUtils . getForkId ( this . taskId , i ) ; long recordsWritten = metrics . getCounter ( MetricGroup . TASK . name ( ) , forkBranchId , RECO... | Adjust job - level metrics when the task gets retried . |
36,274 | public static String getFullEventName ( State state ) { return Joiner . on ( '.' ) . join ( LineageEventBuilder . LIENAGE_EVENT_NAMESPACE , state . getProp ( getKey ( NAME_KEY ) ) ) ; } | Get the full lineage event name from a state |
36,275 | static double [ ] addVector ( double [ ] x , double [ ] y , double c , double [ ] reuse ) { if ( reuse == null ) { reuse = new double [ x . length ] ; } for ( int i = 0 ; i < x . length ; i ++ ) { reuse [ i ] = x [ i ] + c * y [ i ] ; } return reuse ; } | Performs x + cy |
36,276 | public static long getProcessedCount ( List < TaskState > taskStates ) { long value = 0 ; for ( TaskState taskState : taskStates ) { value += taskState . getPropAsLong ( ConfigurationKeys . WRITER_RECORDS_WRITTEN , 0 ) ; } return value ; } | Get the number of records written by all the writers |
36,277 | public static String getTaskFailureExceptions ( List < TaskState > taskStates ) { StringBuffer sb = new StringBuffer ( ) ; appendTaskStateValues ( taskStates , sb , TASK_FAILURE_MESSAGE_KEY ) ; appendTaskStateValues ( taskStates , sb , ConfigurationKeys . TASK_FAILURE_EXCEPTION_KEY ) ; return sb . toString ( ) ; } | Get failure messages |
36,278 | public void commit ( ) throws IOException { if ( ! this . actualProcessedCopyableFile . isPresent ( ) ) { return ; } CopyableFile copyableFile = this . actualProcessedCopyableFile . get ( ) ; Path stagingFilePath = getStagingFilePath ( copyableFile ) ; Path outputFilePath = getSplitOutputFilePath ( copyableFile , this ... | Moves the file from task staging to task output . Each task has its own staging directory but all the tasks share the same task output directory . |
36,279 | public long getGap ( MultiLongWatermark highWatermark ) { Preconditions . checkNotNull ( highWatermark ) ; Preconditions . checkArgument ( this . values . size ( ) == highWatermark . values . size ( ) ) ; long diff = 0 ; for ( int i = 0 ; i < this . values . size ( ) ; i ++ ) { Preconditions . checkArgument ( this . va... | Get the number of records that need to be pulled given the high watermark . |
36,280 | public static KafkaWrapper create ( State state ) { Preconditions . checkNotNull ( state . getProp ( ConfigurationKeys . KAFKA_BROKERS ) , "Need to specify at least one Kafka broker." ) ; KafkaWrapper . Builder builder = new KafkaWrapper . Builder ( ) ; if ( state . getPropAsBoolean ( USE_NEW_KAFKA_API , DEFAULT_USE_NE... | Create a KafkaWrapper based on the given type of Kafka API and list of Kafka brokers . |
36,281 | protected void addWriterOutputToExistingDir ( Path writerOutput , Path publisherOutput , WorkUnitState workUnitState , int branchId , ParallelRunner parallelRunner ) throws IOException { for ( FileStatus status : FileListUtils . listFilesRecursively ( this . writerFileSystemByBranches . get ( branchId ) , writerOutput ... | This method needs to be overridden for TimePartitionedDataPublisher since the output folder structure contains timestamp we have to move the files recursively . |
36,282 | public static void sendEmail ( State state , String subject , String message ) throws EmailException { Email email = new SimpleEmail ( ) ; email . setHostName ( state . getProp ( ConfigurationKeys . EMAIL_HOST_KEY , ConfigurationKeys . DEFAULT_EMAIL_HOST ) ) ; if ( state . contains ( ConfigurationKeys . EMAIL_SMTP_PORT... | A general method for sending emails . |
36,283 | public static void sendJobCompletionEmail ( String jobId , String message , String state , State jobState ) throws EmailException { sendEmail ( jobState , String . format ( "Gobblin notification: job %s has completed with state %s" , jobId , state ) , message ) ; } | Send a job completion notification email . |
36,284 | public static void sendJobCancellationEmail ( String jobId , String message , State jobState ) throws EmailException { sendEmail ( jobState , String . format ( "Gobblin notification: job %s has been cancelled" , jobId ) , message ) ; } | Send a job cancellation notification email . |
36,285 | public static void sendJobFailureAlertEmail ( String jobName , String message , int failures , State jobState ) throws EmailException { sendEmail ( jobState , String . format ( "Gobblin alert: job %s has failed %d %s consecutively in the past" , jobName , failures , failures > 1 ? "times" : "time" ) , message ) ; } | Send a job failure alert email . |
36,286 | public boolean isRecompactionNeeded ( DatasetHelper helper ) { if ( recompactionConditions . isEmpty ( ) ) return false ; if ( operation == CombineOperation . OR ) { for ( RecompactionCondition c : recompactionConditions ) { if ( c . isRecompactionNeeded ( helper ) ) { return true ; } } return false ; } else { for ( Re... | For OR combination return true iff one of conditions return true For AND combination return true iff all of conditions return true Other cases return false |
36,287 | public void open ( Path errFilePath ) throws IOException { this . fs . mkdirs ( errFilePath . getParent ( ) ) ; OutputStream os = this . closer . register ( this . fs . exists ( errFilePath ) ? this . fs . append ( errFilePath ) : this . fs . create ( errFilePath ) ) ; this . writer = this . closer . register ( new Buf... | Open a BufferedWriter |
36,288 | public Partition getGlobalPartition ( long previousWatermark ) { ExtractType extractType = ExtractType . valueOf ( state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_EXTRACT_TYPE ) . toUpperCase ( ) ) ; WatermarkType watermarkType = WatermarkType . valueOf ( state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_... | Get the global partition of the whole data set which has the global low and high watermarks |
36,289 | public HashMap < Long , Long > getPartitions ( long previousWatermark ) { HashMap < Long , Long > defaultPartition = Maps . newHashMap ( ) ; if ( ! isWatermarkExists ( ) ) { defaultPartition . put ( ConfigurationKeys . DEFAULT_WATERMARK_VALUE , ConfigurationKeys . DEFAULT_WATERMARK_VALUE ) ; LOG . info ( "Watermark col... | Get partitions with low and high water marks |
36,290 | public List < Partition > getPartitionList ( long previousWatermark ) { if ( state . getPropAsBoolean ( HAS_USER_SPECIFIED_PARTITIONS ) ) { return createUserSpecifiedPartitions ( ) ; } List < Partition > partitions = new ArrayList < > ( ) ; HashMap < Long , Long > partitionMap = getPartitions ( previousWatermark ) ; if... | Get an unordered list of partition with lowWatermark highWatermark and hasUserSpecifiedHighWatermark . |
36,291 | private List < Partition > createUserSpecifiedPartitions ( ) { List < Partition > partitions = new ArrayList < > ( ) ; List < String > watermarkPoints = state . getPropAsList ( USER_SPECIFIED_PARTITIONS ) ; boolean isEarlyStopped = state . getPropAsBoolean ( IS_EARLY_STOPPED ) ; if ( watermarkPoints == null || watermar... | Generate the partitions based on the lists specified by the user in job config |
36,292 | private static long adjustWatermark ( String baseWatermark , WatermarkType watermarkType ) { long result = ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; switch ( watermarkType ) { case SIMPLE : result = SimpleWatermark . adjustWatermark ( baseWatermark , 0 ) ; break ; case DATE : result = DateWatermark . adjustWatermar... | Adjust a watermark based on watermark type |
36,293 | private static int getUpdatedInterval ( int inputInterval , ExtractType extractType , WatermarkType watermarkType ) { LOG . debug ( "Getting updated interval" ) ; if ( ( extractType == ExtractType . SNAPSHOT && watermarkType == WatermarkType . DATE ) ) { return inputInterval * 24 ; } else if ( extractType == ExtractTyp... | Calculate interval in hours with the given interval |
36,294 | private long getSnapshotLowWatermark ( WatermarkType watermarkType , long previousWatermark , int deltaForNextWatermark ) { LOG . debug ( "Getting snapshot low water mark" ) ; String timeZone = this . state . getProp ( ConfigurationKeys . SOURCE_TIMEZONE , ConfigurationKeys . DEFAULT_SOURCE_TIMEZONE ) ; if ( isPrevious... | Get low water mark |
36,295 | protected long getHighWatermark ( ExtractType extractType , WatermarkType watermarkType ) { LOG . debug ( "Getting high watermark" ) ; String timeZone = this . state . getProp ( ConfigurationKeys . SOURCE_TIMEZONE ) ; long highWatermark = ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; if ( this . isWatermarkOverride ( )... | Get high water mark |
36,296 | private long getSnapshotHighWatermark ( WatermarkType watermarkType ) { LOG . debug ( "Getting snapshot high water mark" ) ; if ( isSimpleWatermark ( watermarkType ) ) { return ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; } String timeZone = this . state . getProp ( ConfigurationKeys . SOURCE_TIMEZONE ) ; return Long ... | Get snapshot high water mark |
36,297 | private long getAppendHighWatermark ( ExtractType extractType ) { LOG . debug ( "Getting append high water mark" ) ; if ( this . isFullDump ( ) ) { LOG . info ( "Overriding high water mark with end value:" + ConfigurationKeys . SOURCE_QUERYBASED_END_VALUE ) ; long highWatermark = this . state . getPropAsLong ( Configur... | Get append high water mark |
36,298 | private long getAppendWatermarkCutoff ( ExtractType extractType ) { LOG . debug ( "Getting append water mark cutoff" ) ; long highWatermark = ConfigurationKeys . DEFAULT_WATERMARK_VALUE ; String timeZone = this . state . getProp ( ConfigurationKeys . SOURCE_TIMEZONE ) ; AppendMaxLimitType limitType = getAppendLimitType... | Get cutoff for high water mark |
36,299 | private static AppendMaxLimitType getAppendLimitType ( ExtractType extractType , String maxLimit ) { LOG . debug ( "Getting append limit type" ) ; AppendMaxLimitType limitType ; switch ( extractType ) { case APPEND_DAILY : limitType = AppendMaxLimitType . CURRENTDATE ; break ; case APPEND_HOURLY : limitType = AppendMax... | Get append max limit type from the input |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.