idx int64 0 41.2k | question stringlengths 74 4.21k | target stringlengths 5 888 |
|---|---|---|
36,300 | private static int getAppendLimitDelta ( String maxLimit ) { LOG . debug ( "Getting append limit delta" ) ; int limitDelta = 0 ; if ( ! Strings . isNullOrEmpty ( maxLimit ) ) { String [ ] limitParams = maxLimit . split ( "-" ) ; if ( limitParams . length >= 2 ) { limitDelta = Integer . parseInt ( limitParams [ 1 ] ) ; ... | Get append max limit delta num |
36,301 | private boolean isWatermarkExists ( ) { if ( ! Strings . isNullOrEmpty ( this . state . getProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY ) ) && ! Strings . isNullOrEmpty ( this . state . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_WATERMARK_TYPE ) ) ) { return true ; } return false ; } | true if water mark columns and water mark type provided |
36,302 | public static Map < String , Object > getConfigForBranch ( EntityType entityType , WorkUnitState workUnitState ) { return getConfigForBranch ( entityType , null , workUnitState ) ; } | Retrieve encryption configuration for the branch the WorKUnitState represents |
36,303 | public static Map < String , Object > getConfigForBranch ( EntityType entityType , State taskState , int numBranches , int branch ) { return getConfigForBranch ( taskState , entityType . getConfigPrefix ( ) , ForkOperatorUtils . getPropertyNameForBranch ( "" , numBranches , branch ) ) ; } | Retrieve encryption config for a given branch of a task |
36,304 | public static String getKeystoreType ( Map < String , Object > parameters ) { String type = ( String ) parameters . get ( ENCRYPTION_KEYSTORE_TYPE_KEY ) ; if ( type == null ) { type = ENCRYPTION_KEYSTORE_TYPE_KEY_DEFAULT ; } return type ; } | Get the type of keystore to instantiate |
36,305 | public static String getCipher ( Map < String , Object > parameters ) { return ( String ) parameters . get ( ENCRYPTION_CIPHER_KEY ) ; } | Get the underlying cipher name |
36,306 | private static Map < String , Object > extractPropertiesForBranch ( Properties properties , String prefix , String branchSuffix ) { Map < String , Object > ret = new HashMap < > ( ) ; for ( Map . Entry < Object , Object > prop : properties . entrySet ( ) ) { String key = ( String ) prop . getKey ( ) ; if ( key . starts... | Extract a set of properties for a given branch stripping out the prefix and branch suffix . |
36,307 | private boolean isRestorable ( HivePartitionDataset dataset , HivePartitionVersion version ) throws IOException { if ( version . getLocation ( ) . toString ( ) . equalsIgnoreCase ( dataset . getLocation ( ) . toString ( ) ) ) { return false ; } FileSystem fs = ProxyUtils . getOwnerFs ( new State ( this . state ) , vers... | A version is called restorable if it can be used to restore dataset . |
36,308 | public static void createGobblinHelixCluster ( String zkConnectionString , String clusterName , boolean overwrite ) { ClusterSetup clusterSetup = new ClusterSetup ( zkConnectionString ) ; clusterSetup . addCluster ( clusterName , overwrite ) ; String autoJoinConfig = ZKHelixManager . ALLOW_PARTICIPANT_AUTO_JOIN + "=tru... | Create a Helix cluster for the Gobblin Cluster application . |
36,309 | public static void submitJobToQueue ( JobConfig . Builder jobConfigBuilder , String queueName , String jobName , TaskDriver helixTaskDriver , HelixManager helixManager , long jobQueueDeleteTimeoutSeconds ) throws Exception { submitJobToWorkFlow ( jobConfigBuilder , queueName , jobName , helixTaskDriver , helixManager ,... | We have switched from Helix JobQueue to WorkFlow based job execution . |
36,310 | private static void deleteStoppedHelixJob ( HelixManager helixManager , String workFlowName , String jobName ) throws InterruptedException { WorkflowContext workflowContext = TaskDriver . getWorkflowContext ( helixManager , workFlowName ) ; while ( workflowContext . getJobState ( TaskUtil . getNamespacedJobName ( workF... | Deletes the stopped Helix Workflow . Caller should stop the Workflow before calling this method . |
36,311 | public boolean copyPartitionParams ( String completeSourcePartitionName , String completeDestPartitionName , List < String > whitelist , List < String > blacklist ) { Optional < Partition > sourcePartitionOptional = getPartitionObject ( completeSourcePartitionName ) ; Optional < Partition > destPartitionOptional = getP... | Method to copy partition parameters from source partition to destination partition |
36,312 | public static Set < String > findFullPrefixKeys ( Properties properties , Optional < String > keyPrefix ) { TreeSet < String > propNames = new TreeSet < > ( ) ; for ( Map . Entry < Object , Object > entry : properties . entrySet ( ) ) { String entryKey = entry . getKey ( ) . toString ( ) ; if ( StringUtils . startsWith... | Finds a list of properties whose keys are complete prefix of other keys . This function is meant to be used during conversion from Properties to typesafe Config as the latter does not support this scenario . |
36,313 | private static Map < String , Object > guessPropertiesTypes ( Map < Object , Object > srcProperties ) { Map < String , Object > res = new HashMap < > ( ) ; for ( Map . Entry < Object , Object > prop : srcProperties . entrySet ( ) ) { Object value = prop . getValue ( ) ; if ( null != value && value instanceof String && ... | Attempts to guess type types of a Properties . By default typesafe will make all property values Strings . This implementation will try to recognize booleans and numbers . All keys are treated as strings . |
36,314 | public static boolean verifySubset ( Config superConfig , Config subConfig ) { for ( Map . Entry < String , ConfigValue > entry : subConfig . entrySet ( ) ) { if ( ! superConfig . hasPath ( entry . getKey ( ) ) || ! superConfig . getValue ( entry . getKey ( ) ) . unwrapped ( ) . equals ( entry . getValue ( ) . unwrappe... | Check that every key - value in superConfig is in subConfig |
36,315 | public static boolean checkReaderWriterCompatibility ( Schema readerSchema , Schema writerSchema , boolean ignoreNamespace ) { if ( ignoreNamespace ) { List < Schema . Field > fields = deepCopySchemaFields ( readerSchema ) ; readerSchema = Schema . createRecord ( writerSchema . getName ( ) , writerSchema . getDoc ( ) ,... | Validates that the provided reader schema can be used to decode avro data written with the provided writer schema . |
36,316 | public static Optional < Field > getField ( Schema schema , String fieldLocation ) { Preconditions . checkNotNull ( schema ) ; Preconditions . checkArgument ( ! Strings . isNullOrEmpty ( fieldLocation ) ) ; Splitter splitter = Splitter . on ( FIELD_LOCATION_DELIMITER ) . omitEmptyStrings ( ) . trimResults ( ) ; List < ... | Given a GenericRecord this method will return the field specified by the path parameter . The fieldLocation parameter is an ordered string specifying the location of the nested field to retrieve . For example field1 . nestedField1 takes field field1 and retrieves nestedField1 from it . |
36,317 | public static Optional < Object > getFieldValue ( GenericRecord record , String fieldLocation ) { Map < String , Object > ret = getMultiFieldValue ( record , fieldLocation ) ; return Optional . fromNullable ( ret . get ( fieldLocation ) ) ; } | Given a GenericRecord this method will return the field specified by the path parameter . The fieldLocation parameter is an ordered string specifying the location of the nested field to retrieve . For example field1 . nestedField1 takes the the value of the field field1 and retrieves the field nestedField1 from it . |
36,318 | private static Object getObjectFromMap ( Map map , String key ) { Utf8 utf8Key = new Utf8 ( key ) ; return map . get ( utf8Key ) ; } | This method is to get object from map given a key as string . Avro persists string as Utf8 |
36,319 | public static GenericRecord convertRecordSchema ( GenericRecord record , Schema newSchema ) throws IOException { if ( record . getSchema ( ) . equals ( newSchema ) ) { return record ; } try { BinaryDecoder decoder = new DecoderFactory ( ) . binaryDecoder ( recordToByteArray ( record ) , null ) ; DatumReader < GenericRe... | Change the schema of an Avro record . |
36,320 | public static byte [ ] recordToByteArray ( GenericRecord record ) throws IOException { try ( ByteArrayOutputStream out = new ByteArrayOutputStream ( ) ) { Encoder encoder = EncoderFactory . get ( ) . directBinaryEncoder ( out , null ) ; DatumWriter < GenericRecord > writer = new GenericDatumWriter < > ( record . getSch... | Convert a GenericRecord to a byte array . |
36,321 | public static Schema getSchemaFromDataFile ( Path dataFile , FileSystem fs ) throws IOException { try ( SeekableInput sin = new FsInput ( dataFile , fs . getConf ( ) ) ; DataFileReader < GenericRecord > reader = new DataFileReader < > ( sin , new GenericDatumReader < GenericRecord > ( ) ) ) { return reader . getSchema ... | Get Avro schema from an Avro data file . |
36,322 | public static Schema parseSchemaFromFile ( Path filePath , FileSystem fs ) throws IOException { Preconditions . checkArgument ( fs . exists ( filePath ) , filePath + " does not exist" ) ; try ( FSDataInputStream in = fs . open ( filePath ) ) { return new Schema . Parser ( ) . parse ( in ) ; } } | Parse Avro schema from a schema file . |
36,323 | public static void writeSchemaToFile ( Schema schema , Path filePath , Path tempFilePath , FileSystem fs , boolean overwrite , FsPermission perm ) throws IOException { boolean fileExists = fs . exists ( filePath ) ; if ( ! overwrite ) { Preconditions . checkState ( ! fileExists , filePath + " already exists" ) ; } else... | Write a schema to a file |
36,324 | public static Schema nullifyFieldsForSchemaMerge ( Schema oldSchema , Schema newSchema ) { if ( oldSchema == null ) { LOG . warn ( "No previous schema available, use the new schema instead." ) ; return newSchema ; } if ( ! ( oldSchema . getType ( ) . equals ( Type . RECORD ) && newSchema . getType ( ) . equals ( Type .... | Merge oldSchema and newSchame . Set a field default value to null if this field exists in the old schema but not in the new schema . |
36,325 | public static Optional < Schema > removeUncomparableFields ( Schema schema ) { return removeUncomparableFields ( schema , Sets . < Schema > newHashSet ( ) ) ; } | Remove map array enum fields as well as union fields that contain map array or enum from an Avro schema . A schema with these fields cannot be used as Mapper key in a MapReduce job . |
36,326 | public Collection < Spec > getAllVersionsOfSpec ( URI specUri ) { Preconditions . checkArgument ( null != specUri , "Spec URI should not be null" ) ; Path specPath = getPathForURI ( this . fsSpecStoreDirPath , specUri , FlowSpec . Builder . DEFAULT_VERSION ) ; return getAllVersionsOfSpec ( specPath ) ; } | Returns all versions of the spec defined by specUri . Currently multiple versions are not supported so this should return exactly one spec . |
36,327 | protected Path getPathForURI ( Path fsSpecStoreDirPath , URI uri , String version ) { return PathUtils . addExtension ( PathUtils . mergePaths ( fsSpecStoreDirPath , new Path ( uri ) ) , version ) ; } | Construct a file path given URI and version of a spec . |
36,328 | public static JsonElementConverter getConverter ( JsonSchema schema , boolean repeated ) { InputType fieldType = schema . getInputType ( ) ; switch ( fieldType ) { case INT : return new IntConverter ( schema , repeated ) ; case LONG : return new LongConverter ( schema , repeated ) ; case FLOAT : return new FloatConvert... | Use to create a converter for a single field from a parquetSchema . |
36,329 | private Config loadConfigFileWithFlowNameOverrides ( Path configFilePath ) throws IOException { Config flowConfig = this . pullFileLoader . loadPullFile ( configFilePath , emptyConfig , false ) ; String flowName = FSSpecStore . getSpecName ( configFilePath ) ; String flowGroup = FSSpecStore . getSpecGroup ( configFileP... | Load the config file and override the flow name and flow path properties with the names from the file path |
36,330 | public static URI buildURI ( String urlTemplate , Map < String , String > keys , Map < String , String > queryParams ) { String url = urlTemplate ; if ( keys != null && keys . size ( ) != 0 ) { url = StrSubstitutor . replace ( urlTemplate , keys ) ; } try { URIBuilder uriBuilder = new URIBuilder ( url ) ; if ( queryPar... | Given a url template interpolate with keys and build the URI after adding query parameters |
36,331 | public static Set < String > getErrorCodeWhitelist ( Config config ) { String list = config . getString ( HttpConstants . ERROR_CODE_WHITELIST ) . toLowerCase ( ) ; return new HashSet < > ( getStringList ( list ) ) ; } | Get the error code whitelist from a config |
36,332 | public static Map < String , Object > toMap ( String jsonString ) { Map < String , Object > map = new HashMap < > ( ) ; return GSON . fromJson ( jsonString , map . getClass ( ) ) ; } | Convert a json encoded string to a Map |
36,333 | public static String createR2ClientLimiterKey ( Config config ) { String urlTemplate = config . getString ( HttpConstants . URL_TEMPLATE ) ; try { String escaped = URIUtil . encodeQuery ( urlTemplate ) ; URI uri = new URI ( escaped ) ; if ( uri . getHost ( ) == null ) throw new RuntimeException ( "Cannot get host part ... | Convert D2 URL template into a string used for throttling limiter |
36,334 | public void onSuccessfulWrite ( long startTimeNanos ) { Instrumented . updateTimer ( this . dataWriterTimer , System . nanoTime ( ) - startTimeNanos , TimeUnit . NANOSECONDS ) ; Instrumented . markMeter ( this . successfulWritesMeter ) ; } | Called after a successful write of a record . |
36,335 | public String convertSchema ( Schema inputSchema , WorkUnitState workUnit ) throws SchemaConversionException { return EnvelopeSchemaConverter . class . getName ( ) ; } | Do nothing actual schema must be obtained from records . |
36,336 | public Iterable < GenericRecord > convertRecord ( String outputSchema , GenericRecord inputRecord , WorkUnitState workUnit ) throws DataConversionException { try { String schemaIdField = workUnit . contains ( PAYLOAD_SCHEMA_ID_FIELD ) ? workUnit . getProp ( PAYLOAD_SCHEMA_ID_FIELD ) : DEFAULT_PAYLOAD_SCHEMA_ID_FIELD ; ... | Get actual schema from registry and deserialize payload using it . |
36,337 | public byte [ ] getPayload ( GenericRecord inputRecord , String payloadFieldName ) { ByteBuffer bb = ( ByteBuffer ) inputRecord . get ( payloadFieldName ) ; byte [ ] payloadBytes ; if ( bb . hasArray ( ) ) { payloadBytes = bb . array ( ) ; } else { payloadBytes = new byte [ bb . remaining ( ) ] ; bb . get ( payloadByte... | Get payload field from GenericRecord and convert to byte array |
36,338 | public GenericRecord deserializePayload ( byte [ ] payload , Schema payloadSchema ) throws IOException , ExecutionException { Decoder decoder = this . decoderFactory . binaryDecoder ( payload , null ) ; GenericDatumReader < GenericRecord > reader = this . readers . get ( payloadSchema ) ; return reader . read ( null , ... | Deserialize payload using payload schema |
36,339 | private void deleteStateStore ( URI jobSpecUri ) throws IOException { int EXPECTED_NUM_URI_TOKENS = 3 ; String [ ] uriTokens = jobSpecUri . getPath ( ) . split ( "/" ) ; if ( null == this . datasetStateStore ) { log . warn ( "Job state store deletion failed as datasetstore is not initialized." ) ; return ; } if ( uriTo... | It fetches the job name from the given jobSpecUri and deletes its corresponding state store |
36,340 | private boolean shouldRemoveDataPullUpperBounds ( ) { if ( ! this . workUnitState . getPropAsBoolean ( ConfigurationKeys . SOURCE_QUERYBASED_ALLOW_REMOVE_UPPER_BOUNDS , true ) ) { return false ; } if ( ! partition . isLastPartition ( ) ) { return false ; } if ( partition . getHasUserSpecifiedHighWatermark ( ) || this .... | Check if it s appropriate to remove data pull upper bounds in the last work unit fetching as much data as possible from the source . As between the time when data query was created and that was executed there might be some new data generated in the source . Removing the upper bounds will help us grab the new data . |
36,341 | private void removeDataPullUpperBounds ( ) { log . info ( "Removing data pull upper bound for last work unit" ) ; Iterator < Predicate > it = predicateList . iterator ( ) ; while ( it . hasNext ( ) ) { Predicate predicate = it . next ( ) ; if ( predicate . getType ( ) == Predicate . PredicateType . HWM ) { log . info (... | Remove all upper bounds in the predicateList used for pulling data |
36,342 | private Iterator < D > getIterator ( ) throws DataRecordException , IOException { if ( Boolean . valueOf ( this . workUnitState . getProp ( ConfigurationKeys . SOURCE_QUERYBASED_IS_SPECIFIC_API_ACTIVE ) ) ) { return this . getRecordSetFromSourceApi ( this . schema , this . entity , this . workUnit , this . predicateLis... | Get iterator from protocol specific api if is . specific . api . active is false Get iterator from source specific api if is . specific . api . active is true |
36,343 | public void close ( ) { log . info ( "Updating the current state high water mark with " + this . highWatermark ) ; this . workUnitState . setActualHighWatermark ( new LongWatermark ( this . highWatermark ) ) ; try { this . closeConnection ( ) ; } catch ( Exception e ) { log . error ( "Failed to close the extractor" , e... | close extractor read stream update high watermark |
36,344 | public Extractor < S , D > build ( ) throws ExtractPrepareException { String watermarkColumn = this . workUnitState . getProp ( ConfigurationKeys . EXTRACT_DELTA_FIELDS_KEY ) ; long lwm = partition . getLowWatermark ( ) ; long hwm = partition . getHighWatermark ( ) ; log . info ( "Low water mark: " + lwm + "; and High ... | build schema record count and high water mark |
36,345 | private void setRangePredicates ( String watermarkColumn , WatermarkType watermarkType , long lwmValue , long hwmValue ) { log . debug ( "Getting range predicates" ) ; String lwmOperator = partition . isLowWatermarkInclusive ( ) ? ">=" : ">" ; String hwmOperator = ( partition . isLastPartition ( ) || partition . isHigh... | range predicates for watermark column and transaction columns . |
36,346 | public void modifyDatasetForRecompact ( State recompactState ) { if ( ! this . jobProps ( ) . getPropAsBoolean ( MRCompactor . COMPACTION_RECOMPACT_ALL_DATA , MRCompactor . DEFAULT_COMPACTION_RECOMPACT_ALL_DATA ) ) { this . overwriteInputPath ( this . outputLatePath ) ; this . cleanAdditionalInputPath ( ) ; } else { th... | Modify an existing dataset to recompact from its ouput path . |
36,347 | public void beforeConvert ( SO outputSchema , DI inputRecord , WorkUnitState workUnit ) { Instrumented . markMeter ( this . recordsInMeter ) ; } | Called before conversion . |
36,348 | public void afterConvert ( Iterable < DO > iterable , long startTimeNanos ) { Instrumented . updateTimer ( this . converterTimer , System . nanoTime ( ) - startTimeNanos , TimeUnit . NANOSECONDS ) ; } | Called after conversion . |
36,349 | protected Callback < Response < PermitAllocation > > decorateCallback ( PermitRequest request , Callback < Response < PermitAllocation > > callback ) { return callback ; } | Decorate the callback to intercept some responses . |
36,350 | public static List < Pattern > getPatternsFromStrings ( List < String > strings ) { List < Pattern > patterns = Lists . newArrayList ( ) ; for ( String s : strings ) { patterns . add ( Pattern . compile ( s ) ) ; } return patterns ; } | Convert a list of Strings to a list of Patterns . |
36,351 | public static boolean stringInPatterns ( String s , List < Pattern > patterns ) { for ( Pattern pattern : patterns ) { if ( pattern . matcher ( s ) . matches ( ) ) { return true ; } } return false ; } | Determines whether a string matches one of the regex patterns . |
36,352 | public List < HivePartitionDataset > findDatasets ( ) throws IOException { List < HivePartitionDataset > list = new ArrayList < > ( ) ; for ( HiveDataset hiveDataset : this . hiveDatasets ) { for ( Partition partition : hiveDataset . getPartitionsFromDataset ( ) ) { list . add ( new HivePartitionDataset ( partition ) )... | Will find all datasets according to whitelist except the backup trash and staging tables . |
36,353 | private void build ( ) { this . startNodes = new ArrayList < > ( ) ; this . endNodes = new ArrayList < > ( ) ; this . parentChildMap = new HashMap < > ( ) ; for ( DagNode node : this . nodes ) { if ( node . getParentNodes ( ) == null ) { this . startNodes . add ( node ) ; } else { List < DagNode > parentNodeList = node... | Constructs the dag from the Node list . |
36,354 | public InputStream decryptFile ( InputStream inputStream , String passPhrase ) throws IOException { PGPEncryptedDataList enc = getPGPEncryptedDataList ( inputStream ) ; PGPPBEEncryptedData pbe = ( PGPPBEEncryptedData ) enc . get ( 0 ) ; InputStream clear ; try { clear = pbe . getDataStream ( new JcePBEDataDecryptorFact... | Taking in a file inputstream and a passPhrase generate a decrypted file inputstream . |
36,355 | public InputStream decryptFile ( InputStream inputStream , InputStream keyIn , String passPhrase ) throws IOException { try { PGPEncryptedDataList enc = getPGPEncryptedDataList ( inputStream ) ; Iterator it = enc . getEncryptedDataObjects ( ) ; PGPPrivateKey sKey = null ; PGPPublicKeyEncryptedData pbe = null ; PGPSecre... | Taking in a file inputstream keyring inputstream and a passPhrase generate a decrypted file inputstream . |
36,356 | private PGPPrivateKey findSecretKey ( PGPSecretKeyRingCollection pgpSec , long keyID , String passPhrase ) throws PGPException { PGPSecretKey pgpSecKey = pgpSec . getSecretKey ( keyID ) ; if ( pgpSecKey == null ) { return null ; } return pgpSecKey . extractPrivateKey ( new JcePBESecretKeyDecryptorBuilder ( ) . setProvi... | Private util function that finds the private key from keyring collection based on keyId and passPhrase |
36,357 | private PGPEncryptedDataList getPGPEncryptedDataList ( InputStream inputStream ) throws IOException { if ( Security . getProvider ( BouncyCastleProvider . PROVIDER_NAME ) == null ) { Security . addProvider ( new BouncyCastleProvider ( ) ) ; } inputStream = PGPUtil . getDecoderStream ( inputStream ) ; JcaPGPObjectFactor... | Generate a PGPEncryptedDataList from an inputstream |
36,358 | @ SuppressWarnings ( "unchecked" ) public Iterator < String [ ] > downloadFile ( String file ) throws IOException { log . info ( "Beginning to download file: " + file ) ; final State state = fileBasedExtractor . workUnitState ; CSVReader reader ; try { if ( state . contains ( DELIMITER ) ) { String delimiterStr = state... | Provide iterator via OpenCSV s CSVReader . Provides a way to skip top rows by providing regex . ( This is useful when CSV file comes with comments on top rows but not in fixed size . It also provides validation on schema by matching header names between property s schema and header name in CSV file . |
36,359 | public WorkUnitStream transform ( Function < WorkUnit , WorkUnit > function ) { if ( this . materializedWorkUnits == null ) { return new BasicWorkUnitStream ( this , Iterators . transform ( this . workUnits , function ) , null ) ; } else { return new BasicWorkUnitStream ( this , null , Lists . newArrayList ( Lists . tr... | Apply a transformation function to this stream . |
36,360 | public WorkUnitStream filter ( Predicate < WorkUnit > predicate ) { if ( this . materializedWorkUnits == null ) { return new BasicWorkUnitStream ( this , Iterators . filter ( this . workUnits , predicate ) , null ) ; } else { return new BasicWorkUnitStream ( this , null , Lists . newArrayList ( Iterables . filter ( thi... | Apply a filtering function to this stream . |
36,361 | public void printTable ( ) { if ( this . labels != null ) { System . out . printf ( this . rowFormat , this . labels . toArray ( ) ) ; } for ( List < String > row : this . data ) { System . out . printf ( this . rowFormat , row . toArray ( ) ) ; } } | Prints the table of data |
36,362 | private List < Integer > getColumnMaxWidths ( ) { int numCols = data . get ( 0 ) . size ( ) ; int [ ] widths = new int [ numCols ] ; if ( this . labels != null ) { for ( int i = 0 ; i < numCols ; i ++ ) { widths [ i ] = this . labels . get ( i ) . length ( ) ; } } for ( List < String > row : this . data ) { for ( int i... | A function for determining the max widths of columns accounting for labels and data . |
36,363 | private String getRowFormat ( List < Integer > widths ) { StringBuilder rowFormat = new StringBuilder ( spaces ( this . indentation ) ) ; for ( int i = 0 ; i < widths . size ( ) ; i ++ ) { rowFormat . append ( "%" ) ; rowFormat . append ( this . flags != null ? this . flags . get ( i ) : "" ) ; rowFormat . append ( wid... | Generates a simple row format string given a set of widths |
36,364 | private void copyJarDependencies ( State state ) throws IOException { if ( this . tmpJobDir == null ) { throw new RuntimeException ( "Job directory is not created" ) ; } if ( ! state . contains ( ConfigurationKeys . JOB_JAR_FILES_KEY ) ) { return ; } LocalFileSystem lfs = FileSystem . getLocal ( HadoopUtils . getConfFr... | Copy dependent jars to a temporary job directory on HDFS |
36,365 | public void executeQueries ( List < String > queries , Optional < String > proxy ) throws SQLException { Preconditions . checkArgument ( ! this . statementMap . isEmpty ( ) , "No hive connection. Unable to execute queries" ) ; if ( ! proxy . isPresent ( ) ) { Preconditions . checkArgument ( this . statementMap . size (... | Execute queries . |
36,366 | public long getRecordCount ( Path filepath ) { String [ ] components = filepath . getName ( ) . split ( Pattern . quote ( SEPARATOR ) ) ; Preconditions . checkArgument ( components . length >= 2 && StringUtils . isNumeric ( components [ components . length - 2 ] ) , String . format ( "Filename %s does not follow the pa... | The record count should be the last component before the filename extension . |
36,367 | private R2Request < GenericRecord > buildWriteRequest ( BufferedRecord < GenericRecord > record ) { if ( record == null ) { return null ; } R2Request < GenericRecord > request = new R2Request < > ( ) ; HttpOperation httpOperation = HttpUtils . toHttpOperation ( record . getRecord ( ) ) ; URI uri = HttpUtils . buildURI ... | Build a request from a single record |
36,368 | public AzkabanClientStatus createProject ( String projectName , String description ) throws AzkabanClientException { AzkabanMultiCallables . CreateProjectCallable callable = AzkabanMultiCallables . CreateProjectCallable . builder ( ) . client ( this ) . projectName ( projectName ) . description ( description ) . build ... | Creates a project . |
36,369 | public AzkabanClientStatus deleteProject ( String projectName ) throws AzkabanClientException { AzkabanMultiCallables . DeleteProjectCallable callable = AzkabanMultiCallables . DeleteProjectCallable . builder ( ) . client ( this ) . projectName ( projectName ) . build ( ) ; return runWithRetry ( callable , AzkabanClien... | Deletes a project . Currently no response message will be returned after finishing the delete operation . Thus success status is always expected . |
36,370 | public AzkabanClientStatus uploadProjectZip ( String projectName , File zipFile ) throws AzkabanClientException { AzkabanMultiCallables . UploadProjectCallable callable = AzkabanMultiCallables . UploadProjectCallable . builder ( ) . client ( this ) . projectName ( projectName ) . zipFile ( zipFile ) . build ( ) ; retur... | Updates a project by uploading a new zip file . Before uploading any project zip files the project should be created first . |
36,371 | public AzkabanExecuteFlowStatus executeFlowWithOptions ( String projectName , String flowName , Map < String , String > flowOptions , Map < String , String > flowParameters ) throws AzkabanClientException { AzkabanMultiCallables . ExecuteFlowCallable callable = AzkabanMultiCallables . ExecuteFlowCallable . builder ( ) ... | Execute a flow by providing flow parameters and options . The project and flow should be created first . |
36,372 | public AzkabanExecuteFlowStatus executeFlow ( String projectName , String flowName , Map < String , String > flowParameters ) throws AzkabanClientException { return executeFlowWithOptions ( projectName , flowName , null , flowParameters ) ; } | Execute a flow with flow parameters . The project and flow should be created first . |
36,373 | public AzkabanClientStatus cancelFlow ( String execId ) throws AzkabanClientException { AzkabanMultiCallables . CancelFlowCallable callable = AzkabanMultiCallables . CancelFlowCallable . builder ( ) . client ( this ) . execId ( execId ) . build ( ) ; return runWithRetry ( callable , AzkabanClientStatus . class ) ; } | Cancel a flow by execution id . |
36,374 | public AzkabanClientStatus fetchExecutionLog ( String execId , String jobId , String offset , String length , File ouf ) throws AzkabanClientException { AzkabanMultiCallables . FetchExecLogCallable callable = AzkabanMultiCallables . FetchExecLogCallable . builder ( ) . client ( this ) . execId ( execId ) . jobId ( jobI... | Fetch an execution log . |
36,375 | public AzkabanFetchExecuteFlowStatus fetchFlowExecution ( String execId ) throws AzkabanClientException { AzkabanMultiCallables . FetchFlowExecCallable callable = AzkabanMultiCallables . FetchFlowExecCallable . builder ( ) . client ( this ) . execId ( execId ) . build ( ) ; return runWithRetry ( callable , AzkabanFetch... | Given an execution id fetches all the detailed information of that execution including a list of all the job executions . |
36,376 | public AsyncDataWriter getAsyncDataWriter ( Properties properties ) { EventhubDataWriter eventhubDataWriter = new EventhubDataWriter ( properties ) ; EventhubBatchAccumulator accumulator = new EventhubBatchAccumulator ( properties ) ; BatchedEventhubDataWriter batchedEventhubDataWriter = new BatchedEventhubDataWriter (... | Create an eventhub data writer wrapped into a buffered async data writer |
36,377 | public PermitsAndDelay getPermitsAndDelay ( long requestedPermits , long minPermits , long timeoutMillis ) { try { long storedTokens = this . tokenBucket . getStoredTokens ( ) ; long eagerTokens = storedTokens / 2 ; if ( eagerTokens > requestedPermits && this . tokenBucket . getTokens ( eagerTokens , 0 , TimeUnit . MIL... | Request tokens . |
36,378 | public StreamCodec buildStreamEncryptor ( Map < String , Object > parameters ) { String encryptionType = EncryptionConfigParser . getEncryptionType ( parameters ) ; if ( encryptionType == null ) { throw new IllegalArgumentException ( "Encryption type not present in parameters!" ) ; } return buildStreamCryptoProvider ( ... | Return a StreamEncryptor for the given parameters . The algorithm type to use will be extracted from the parameters object . |
36,379 | public CredentialStore buildCredentialStore ( Map < String , Object > parameters ) { String ks_type = EncryptionConfigParser . getKeystoreType ( parameters ) ; String ks_path = EncryptionConfigParser . getKeystorePath ( parameters ) ; String ks_password = EncryptionConfigParser . getKeystorePassword ( parameters ) ; tr... | Build a credential store with the given parameters . |
36,380 | public String getCharset ( String contentType ) { String charSet = knownCharsets . get ( contentType ) ; if ( charSet != null ) { return charSet ; } if ( contentType . startsWith ( "text/" ) || contentType . endsWith ( "+json" ) || contentType . endsWith ( "+xml" ) ) { return "UTF-8" ; } return "BINARY" ; } | Check which character set a given content - type corresponds to . |
36,381 | public boolean inferPrintableFromMetadata ( Metadata md ) { String inferredCharset = "BINARY" ; List < String > transferEncoding = md . getGlobalMetadata ( ) . getTransferEncoding ( ) ; if ( transferEncoding != null ) { inferredCharset = getCharset ( transferEncoding . get ( transferEncoding . size ( ) - 1 ) ) ; } else... | Heuristic to infer if content is printable from metadata . |
36,382 | public void registerCharsetMapping ( String contentType , String charSet ) { if ( knownCharsets . contains ( contentType ) ) { log . warn ( "{} is already registered; re-registering" ) ; } knownCharsets . put ( contentType , charSet ) ; } | Register a new contentType to charSet mapping . |
36,383 | private static AuditCountClientFactory getClientFactory ( State state ) { if ( ! state . contains ( AuditCountClientFactory . AUDIT_COUNT_CLIENT_FACTORY ) ) { return new EmptyAuditCountClientFactory ( ) ; } try { String factoryName = state . getProp ( AuditCountClientFactory . AUDIT_COUNT_CLIENT_FACTORY ) ; ClassAliasR... | Obtain a client factory |
36,384 | private static Credential buildCredentialFromP12 ( String privateKeyPath , Optional < String > fsUri , Optional < String > id , HttpTransport transport , Collection < String > serviceAccountScopes ) throws IOException , GeneralSecurityException { Preconditions . checkArgument ( id . isPresent ( ) , "user id is required... | As Google API only accepts java . io . File for private key and this method copies private key into local file system . Once Google credential is instantiated it deletes copied private key file . |
36,385 | private static Path getPrivateKey ( FileSystem fs , String privateKeyPath ) throws IOException { Path keyPath = new Path ( privateKeyPath ) ; FileStatus fileStatus = fs . getFileStatus ( keyPath ) ; Preconditions . checkArgument ( USER_READ_PERMISSION_ONLY . equals ( fileStatus . getPermission ( ) ) , "Private key file... | Before retrieving private key it makes sure that original private key s permission is read only on the owner . This is a way to ensure to keep private key private . |
36,386 | public static HttpTransport newTransport ( String proxyUrl , String portStr ) throws NumberFormatException , GeneralSecurityException , IOException { if ( ! StringUtils . isEmpty ( proxyUrl ) && ! StringUtils . isEmpty ( portStr ) ) { return new NetHttpTransport . Builder ( ) . trustCertificates ( GoogleUtils . getCert... | Provides HttpTransport . If both proxyUrl and postStr is defined it provides transport with Proxy . |
36,387 | public EmbeddedGobblin mrMode ( ) throws IOException { this . sysConfigOverrides . put ( ConfigurationKeys . JOB_LAUNCHER_TYPE_KEY , JobLauncherFactory . JobLauncherType . MAPREDUCE . name ( ) ) ; this . builtConfigMap . put ( ConfigurationKeys . FS_URI_KEY , FileSystem . get ( new Configuration ( ) ) . getUri ( ) . to... | Specify job should run in MR mode . |
36,388 | public EmbeddedGobblin sysConfig ( String key , String value ) { this . sysConfigOverrides . put ( key , value ) ; return this ; } | Override a Gobblin system configuration . |
36,389 | public EmbeddedGobblin setConfiguration ( String key , String value ) { this . userConfigMap . put ( key , value ) ; return this ; } | Manually set a key - value pair in the job configuration . |
36,390 | public EmbeddedGobblin setJobTimeout ( String timeout ) { return setJobTimeout ( Period . parse ( timeout ) . getSeconds ( ) , TimeUnit . SECONDS ) ; } | Set the timeout for the Gobblin job execution from ISO - style period . |
36,391 | public EmbeddedGobblin setLaunchTimeout ( String timeout ) { return setLaunchTimeout ( Period . parse ( timeout ) . getSeconds ( ) , TimeUnit . SECONDS ) ; } | Set the timeout for launching the Gobblin job from ISO - style period . |
36,392 | public EmbeddedGobblin setShutdownTimeout ( String timeout ) { return setShutdownTimeout ( Period . parse ( timeout ) . getSeconds ( ) , TimeUnit . SECONDS ) ; } | Set the timeout for shutting down the Gobblin instance driver after the job is done from ISO - style period . |
36,393 | public EmbeddedGobblin useStateStore ( String rootDir ) { this . setConfiguration ( ConfigurationKeys . STATE_STORE_ENABLED , "true" ) ; this . setConfiguration ( ConfigurationKeys . STATE_STORE_ROOT_DIR_KEY , rootDir ) ; return this ; } | Enable state store . |
36,394 | public EmbeddedGobblin enableMetrics ( ) { this . usePlugin ( new GobblinMetricsPlugin . Factory ( ) ) ; this . sysConfig ( ConfigurationKeys . METRICS_ENABLED_KEY , Boolean . toString ( true ) ) ; return this ; } | Enable metrics . Does not start any reporters . |
36,395 | public JobExecutionResult run ( ) throws InterruptedException , TimeoutException , ExecutionException { JobExecutionDriver jobDriver = runAsync ( ) ; return jobDriver . get ( this . jobTimeout . getTimeout ( ) , this . jobTimeout . getTimeUnit ( ) ) ; } | Run the Gobblin job . This call will block until the job is done . |
36,396 | private void loadCoreGobblinJarsToDistributedJars ( ) { distributeJarByClassWithPriority ( State . class , 0 ) ; distributeJarByClassWithPriority ( ConstructState . class , 0 ) ; distributeJarByClassWithPriority ( InstrumentedExtractorBase . class , 0 ) ; distributeJarByClassWithPriority ( MetricContext . class , 0 ) ;... | This returns the set of jars required by a basic Gobblin ingestion job . In general these need to be distributed to workers in a distributed environment . |
36,397 | public synchronized void put ( JobSpec jobSpec ) { Preconditions . checkState ( state ( ) == State . RUNNING , String . format ( "%s is not running." , this . getClass ( ) . getName ( ) ) ) ; Preconditions . checkNotNull ( jobSpec ) ; try { long startTime = System . currentTimeMillis ( ) ; Path jobSpecPath = getPathFor... | Allow user to programmatically add a new JobSpec . The method will materialized the jobSpec into real file . |
36,398 | public synchronized void remove ( URI jobURI ) { Preconditions . checkState ( state ( ) == State . RUNNING , String . format ( "%s is not running." , this . getClass ( ) . getName ( ) ) ) ; try { long startTime = System . currentTimeMillis ( ) ; JobSpec jobSpec = getJobSpec ( jobURI ) ; Path jobSpecPath = getPathForURI... | Allow user to programmatically delete a new JobSpec . This method is designed to be reentrant . |
36,399 | protected Metric serializeValue ( String name , Number value , String ... path ) { return new Metric ( MetricRegistry . name ( name , path ) , value . doubleValue ( ) ) ; } | Converts a single key - value pair into a metric . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.