name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
flink_Types_ROW_NAMED
/** * Returns type information for {@link org.apache.flink.types.Row} with fields of the given * types and with given names. A row must not be null. * * <p>A row is a fixed-length, null-aware composite type for storing multiple values in a * deterministic field order. Every field can be null independent of the field's type. The type * of row fields cannot be automatically inferred; therefore, it is required to provide type * information whenever a row is used. * * <p>The schema of rows can have up to <code>Integer.MAX_VALUE</code> fields, however, all row * instances must strictly adhere to the schema defined by the type info. * * <p>Example use: {@code ROW_NAMED(new String[]{"name", "number"}, Types.STRING, Types.INT)}. * * @param fieldNames array of field names * @param types array of field types */ public static TypeInformation<Row> ROW_NAMED(String[] fieldNames, TypeInformation<?>... types) { return new RowTypeInfo(types, fieldNames); }
3.68
shardingsphere-elasticjob_TracingStorageConverterFactory_findConverter
/** * Find {@link TracingStorageConfigurationConverter} for specific storage type. * * @param storageType storage type * @param <T> storage type * @return instance of {@link TracingStorageConfigurationConverter} */ @SuppressWarnings("unchecked") public static <T> Optional<TracingStorageConfigurationConverter<T>> findConverter(final Class<T> storageType) { return ShardingSphereServiceLoader.getServiceInstances(TracingStorageConfigurationConverter.class).stream() .filter(each -> each.storageType().isAssignableFrom(storageType)).map(each -> (TracingStorageConfigurationConverter<T>) each).findFirst(); }
3.68
framework_VDateField_setCurrentResolution
/** * Sets the resolution. * * @param currentResolution * the new resolution */ public void setCurrentResolution(R currentResolution) { this.currentResolution = currentResolution; }
3.68
hadoop_TypedBytesOutput_writeListHeader
/** * Writes a list header. * * @throws IOException */ public void writeListHeader() throws IOException { out.write(Type.LIST.code); }
3.68
hudi_HoodieBaseFileGroupRecordBuffer_merge
/** * Merge two records using the configured record merger. * * @param older * @param olderInfoMap * @param newer * @param newerInfoMap * @return * @throws IOException */ protected Option<T> merge(Option<T> older, Map<String, Object> olderInfoMap, Option<T> newer, Map<String, Object> newerInfoMap) throws IOException { if (!older.isPresent()) { return newer; } Option<Pair<HoodieRecord, Schema>> mergedRecord; if (enablePartialMerging) { mergedRecord = recordMerger.partialMerge( readerContext.constructHoodieRecord(older, olderInfoMap), (Schema) olderInfoMap.get(INTERNAL_META_SCHEMA), readerContext.constructHoodieRecord(newer, newerInfoMap), (Schema) newerInfoMap.get(INTERNAL_META_SCHEMA), readerSchema, payloadProps); } else { mergedRecord = recordMerger.merge( readerContext.constructHoodieRecord(older, olderInfoMap), (Schema) olderInfoMap.get(INTERNAL_META_SCHEMA), readerContext.constructHoodieRecord(newer, newerInfoMap), (Schema) newerInfoMap.get(INTERNAL_META_SCHEMA), payloadProps); } if (mergedRecord.isPresent()) { return Option.ofNullable((T) mergedRecord.get().getLeft().getData()); } return Option.empty(); }
3.68
querydsl_GeometryExpression_intersection
/** * Returns a geometric object that represents the Point set intersection of this geometric * object with anotherGeometry. * * @param geometry other geometry * @return intersection of this and the other geometry */ public GeometryExpression<Geometry> intersection(Expression<? extends Geometry> geometry) { return GeometryExpressions.geometryOperation(SpatialOps.INTERSECTION, mixin, geometry); }
3.68
flink_RawType_restore
/** Restores a raw type from the components of a serialized string representation. */ @SuppressWarnings({"unchecked", "rawtypes"}) public static RawType<?> restore( ClassLoader classLoader, String className, String serializerString) { try { final Class<?> clazz = Class.forName(className, true, classLoader); final byte[] bytes = EncodingUtils.decodeBase64ToBytes(serializerString); final DataInputDeserializer inputDeserializer = new DataInputDeserializer(bytes); final TypeSerializerSnapshot<?> snapshot = TypeSerializerSnapshot.readVersionedSnapshot(inputDeserializer, classLoader); return (RawType<?>) new RawType(clazz, snapshot.restoreSerializer()); } catch (Throwable t) { throw new ValidationException( String.format( "Unable to restore the RAW type of class '%s' with serializer snapshot '%s'.", className, serializerString), t); } }
3.68
framework_Table_getColumnHeader
/** * Gets the header for the specified column. * * @param propertyId * the propertyId identifying the column. * @return the header for the specified column if it has one. */ public String getColumnHeader(Object propertyId) { if (getColumnHeaderMode() == ColumnHeaderMode.HIDDEN) { return null; } String header = columnHeaders.get(propertyId); if ((header == null && getColumnHeaderMode() == ColumnHeaderMode.EXPLICIT_DEFAULTS_ID) || getColumnHeaderMode() == ColumnHeaderMode.ID) { header = propertyId.toString(); } return header; }
3.68
hbase_SplitWALManager_addUsedSplitWALWorker
/** * When master restart, there will be a new splitWorkerAssigner. But if there are splitting WAL * tasks running on the region server side, they will not be count by the new splitWorkerAssigner. * Thus we should add the workers of running tasks to the assigner when we load the procedures * from MasterProcWALs. * @param worker region server which is executing a split WAL task */ public void addUsedSplitWALWorker(ServerName worker) { splitWorkerAssigner.addUsedWorker(worker); }
3.68
hibernate-validator_ConstraintCheckIssue_isWarning
/** * Determine if issue is a warning * * @return true if {@link ConstraintCheckIssue#getKind()} equals to {@link IssueKind#WARNING} */ public boolean isWarning() { return IssueKind.WARNING.equals( kind ); }
3.68
morf_H2_matchesProduct
/** * @see org.alfasoftware.morf.jdbc.DatabaseType#matchesProduct(java.lang.String) */ @Override public boolean matchesProduct(String product) { return product.equalsIgnoreCase("H2"); }
3.68
flink_CheckpointConfig_setMaxConcurrentCheckpoints
/** * Sets the maximum number of checkpoint attempts that may be in progress at the same time. If * this value is <i>n</i>, then no checkpoints will be triggered while <i>n</i> checkpoint * attempts are currently in flight. For the next checkpoint to be triggered, one checkpoint * attempt would need to finish or expire. * * @param maxConcurrentCheckpoints The maximum number of concurrent checkpoint attempts. */ public void setMaxConcurrentCheckpoints(int maxConcurrentCheckpoints) { if (maxConcurrentCheckpoints < 1) { throw new IllegalArgumentException( "The maximum number of concurrent attempts must be at least one."); } configuration.set( ExecutionCheckpointingOptions.MAX_CONCURRENT_CHECKPOINTS, maxConcurrentCheckpoints); }
3.68
flink_Dispatcher_createDirtyJobResultEntryAsync
/** * Creates a dirty entry in the {@link #jobResultStore} based on the passed {@code * hasDirtyJobResultEntry} flag. * * @param executionGraph The {@link AccessExecutionGraph} that is used to generate the entry. * @param hasDirtyJobResultEntry The decision the entry creation is based on. * @return {@code CompletableFuture} that completes as soon as the entry exists. */ private CompletableFuture<Void> createDirtyJobResultEntryAsync( AccessExecutionGraph executionGraph, boolean hasDirtyJobResultEntry) { if (hasDirtyJobResultEntry) { return FutureUtils.completedVoidFuture(); } return jobResultStore.createDirtyResultAsync( new JobResultEntry(JobResult.createFrom(executionGraph))); }
3.68
dubbo_CtClassBuilder_build
/** * build CtClass object */ public CtClass build(ClassLoader classLoader) throws NotFoundException, CannotCompileException { ClassPool pool = new ClassPool(true); pool.insertClassPath(new LoaderClassPath(classLoader)); pool.insertClassPath(new DubboLoaderClassPath()); // create class CtClass ctClass = pool.makeClass(className, pool.get(superClassName)); // add imported packages imports.forEach(pool::importPackage); // add implemented interfaces for (String iface : ifaces) { ctClass.addInterface(pool.get(iface)); } // add constructors for (String constructor : constructors) { ctClass.addConstructor(CtNewConstructor.make(constructor, ctClass)); } // add fields for (String field : fields) { ctClass.addField(CtField.make(field, ctClass)); } // add methods for (String method : methods) { ctClass.addMethod(CtNewMethod.make(method, ctClass)); } return ctClass; }
3.68
morf_HumanReadableStatementHelper_generateListCriterionString
/** * Generates a string describing a criterion made up of a list of sub-criteria and a joining * operator, for example the OR or AND operators. * * @param criterion the criterion with a list of sub-criteria to be composed. * @param join the joining operator, including spaces, for example " or ". * @param invert whether to use the inverse of each sub-criteria. * @return the string. */ private static String generateListCriterionString(final Criterion criterion, final String join, final boolean invert) { final StringBuilder sb = new StringBuilder(); boolean comma = false; for (Criterion componentCriterion : criterion.getCriteria()) { if (comma) { sb.append(join); } else { comma = true; } // Put brackets around AND and OR inner clauses if (Operator.AND.equals(componentCriterion.getOperator()) || Operator.OR.equals(componentCriterion.getOperator())) { sb.append('(').append(generateCriterionString(componentCriterion, invert)).append(')'); } else { sb.append(generateCriterionString(componentCriterion, invert)); } } return sb.toString(); }
3.68
framework_GridElement_getHeaderCell
/** * Gets header cell element with given row and column index. * * @param rowIndex * Row index * @param colIndex * Column index * @return Header cell element with given indices. */ public GridCellElement getHeaderCell(int rowIndex, int colIndex) { return getSubPart("#header[" + rowIndex + "][" + colIndex + "]") .wrap(GridCellElement.class); }
3.68
framework_ContainerOrderedWrapper_nextItemId
/* * Gets the item that is next from the specified item. Don't add a JavaDoc * comment here, we use the default documentation from implemented * interface. */ @Override public Object nextItemId(Object itemId) { if (ordered) { return ((Container.Ordered) container).nextItemId(itemId); } if (itemId == null) { return null; } return next.get(itemId); }
3.68
hbase_HFileLink_createBackReferenceName
/** * Create the back reference name */ // package-private for testing static String createBackReferenceName(final String tableNameStr, final String regionName) { return regionName + "." + tableNameStr.replace(TableName.NAMESPACE_DELIM, '='); }
3.68
morf_SchemaValidator_checkForValidationErrors
/** * Check whether any errors have been added */ private void checkForValidationErrors() { // check for errors if (!validationFailures.isEmpty()) { StringBuilder message = new StringBuilder("Schema validation failures:"); for (String failure : validationFailures) { message.append("\n"); message.append(failure); } throw new RuntimeException(message.toString()); } }
3.68
morf_AbstractSqlDialectTest_testInsertFromSelectWithSomeDefaults
/** * Tests that an insert from a select works when some of the defaults are supplied */ @Test public void testInsertFromSelectWithSomeDefaults() { InsertStatement stmt = new InsertStatement().into(new TableReference(TEST_TABLE)).from(new TableReference(OTHER_TABLE)).withDefaults(new FieldLiteral(20010101).as(DATE_FIELD), new FieldLiteral(0).as(BOOLEAN_FIELD), new NullFieldLiteral().as(BLOB_FIELD)); // FIXME The default of '' for a charField is WRONG. This should probably be one of NULL or ' '. Not an empty string, which is an invalid character! String expectedSql = "INSERT INTO " + tableName(TEST_TABLE) + " (id, version, stringField, intField, floatField, dateField, booleanField, charField, blobField, bigIntegerField, clobField) SELECT id, version, stringField, intField, floatField, 20010101 AS dateField, 0 AS booleanField, NULL AS charField, null AS blobField, 12345 AS bigIntegerField, null AS clobField FROM " + tableName(OTHER_TABLE); List<String> sql = testDialect.convertStatementToSQL(stmt, metadata, SqlDialect.IdTable.withDeterministicName(ID_VALUES_TABLE)); assertEquals("Insert from select statement with some defaults", ImmutableList.of(expectedSql), sql); }
3.68
hadoop_AbstractSchedulerPlanFollower_calculateReservationToPlanProportion
/** * Resizes reservations based on currently available resources. */ private Resource calculateReservationToPlanProportion( ResourceCalculator rescCalculator, Resource availablePlanResources, Resource totalReservationResources, Resource reservationResources) { return Resources.multiply(availablePlanResources, Resources.ratio( rescCalculator, reservationResources, totalReservationResources)); }
3.68
framework_VFilterSelect_createTextBox
/** * This method will create the TextBox used by the VFilterSelect instance. * It is invoked during the Constructor and should only be overridden if a * custom TextBox shall be used. The overriding method cannot use any * instance variables. * * @since 7.1.5 * @return TextBox instance used by this VFilterSelect */ protected TextBox createTextBox() { return new FilterSelectTextBox(); }
3.68
framework_VScrollTable_enableBrowserIntelligence
/** * Enable browser measurement of the table width. */ public void enableBrowserIntelligence() { hTableContainer.getStyle().clearWidth(); }
3.68
framework_HierarchyRenderer_isElementInHierarchyWidget
/** * Decides whether the element was rendered by {@link HierarchyRenderer}. * * @param element * the element to check * @return {@code true} if the element was rendered by a HierarchyRenderer, * {@code false} otherwise. */ public static boolean isElementInHierarchyWidget(Element element) { Widget w = WidgetUtil.findWidget(element); while (w != null) { if (w instanceof HierarchyItem) { return true; } w = w.getParent(); } return false; }
3.68
framework_FocusableFlexTable_addKeyDownHandler
/* * (non-Javadoc) * * @see * com.google.gwt.event.dom.client.HasKeyDownHandlers#addKeyDownHandler( * com.google.gwt.event.dom.client.KeyDownHandler) */ @Override public HandlerRegistration addKeyDownHandler(KeyDownHandler handler) { return addDomHandler(handler, KeyDownEvent.getType()); }
3.68
flink_HsMemoryDataManager_getBuffersInOrder
// Write lock should be acquired before invoke this method. @Override public Deque<BufferIndexAndChannel> getBuffersInOrder( int subpartitionId, SpillStatus spillStatus, ConsumeStatusWithId consumeStatusWithId) { HsSubpartitionMemoryDataManager targetSubpartitionDataManager = getSubpartitionMemoryDataManager(subpartitionId); return targetSubpartitionDataManager.getBuffersSatisfyStatus( spillStatus, consumeStatusWithId); }
3.68
pulsar_Reflections_classInJarImplementsIface
/** * check if a class implements an interface. * * @param fqcn fully qualified class name to search for in jar * @param xface interface to check if implement * @return true if class from jar implements interface xface and false if otherwise */ public static boolean classInJarImplementsIface(java.io.File jar, String fqcn, Class xface) { boolean ret = false; java.net.URLClassLoader loader = null; try { loader = (URLClassLoader) ClassLoaderUtils.loadJar(jar); if (xface.isAssignableFrom(Class.forName(fqcn, false, loader))){ ret = true; } } catch (ClassNotFoundException | NoClassDefFoundError | IOException e) { throw new RuntimeException(e); } finally { if (loader != null) { try { loader.close(); } catch (IOException e) { throw new UncheckedIOException(e); } } } return ret; }
3.68
hudi_SparkDataSourceContinuousIngestTool_readConfigFromFileSystem
/** * Reads config from the file system. * * @param jsc {@link JavaSparkContext} instance. * @param cfg {@link HoodieRepairTool.Config} instance. * @return the {@link TypedProperties} instance. */ private TypedProperties readConfigFromFileSystem(JavaSparkContext jsc, Config cfg) { return UtilHelpers.readConfig(jsc.hadoopConfiguration(), new Path(cfg.propsFilePath), cfg.configs) .getProps(true); }
3.68
flink_ExistingSavepoint_readBroadcastState
/** * Read operator {@code BroadcastState} from a {@code Savepoint} when a custom serializer was * used; e.g., a different serializer than the one returned by {@code * TypeInformation#createSerializer}. * * @param uid The uid of the operator. * @param name The (unique) name for the state. * @param keyTypeInfo The type information for the keys in the state. * @param valueTypeInfo The type information for the values in the state. * @param keySerializer The type serializer used to write keys into the state. * @param valueSerializer The type serializer used to write values into the state. * @param <K> The type of keys in state. * @param <V> The type of values in state. * @return A {@code DataSet} of key-value pairs from state. * @throws IOException If the savepoint path is invalid or the uid does not exist. */ public <K, V> DataSource<Tuple2<K, V>> readBroadcastState( String uid, String name, TypeInformation<K> keyTypeInfo, TypeInformation<V> valueTypeInfo, TypeSerializer<K> keySerializer, TypeSerializer<V> valueSerializer) throws IOException { OperatorState operatorState = metadata.getOperatorState(uid); MapStateDescriptor<K, V> descriptor = new MapStateDescriptor<>(name, keySerializer, valueSerializer); BroadcastStateInputFormat<K, V> inputFormat = new BroadcastStateInputFormat<>( operatorState, env.getConfiguration(), stateBackend, descriptor); return env.createInput(inputFormat, new TupleTypeInfo<>(keyTypeInfo, valueTypeInfo)); }
3.68
hudi_CollectionUtils_copy
/** * Makes a copy of provided {@link Properties} object */ public static Properties copy(Properties props) { Properties copy = new Properties(); copy.putAll(props); return copy; }
3.68
flink_OggJsonFormatFactory_validateDecodingFormatOptions
/** Validator for ogg decoding format. */ private static void validateDecodingFormatOptions(ReadableConfig tableOptions) { JsonFormatOptionsUtil.validateDecodingFormatOptions(tableOptions); }
3.68
hbase_WALProcedureStore_getWALDir
// ========================================================================== // FileSystem Log Files helpers // ========================================================================== public Path getWALDir() { return this.walDir; }
3.68
flink_ArrowUtils_toArrowSchema
/** Returns the Arrow schema of the specified type. */ public static Schema toArrowSchema(RowType rowType) { Collection<Field> fields = rowType.getFields().stream() .map(f -> ArrowUtils.toArrowField(f.getName(), f.getType())) .collect(Collectors.toCollection(ArrayList::new)); return new Schema(fields); }
3.68
morf_DatabaseMetaDataProvider_viewExists
/** * @see org.alfasoftware.morf.metadata.Schema#viewExists(java.lang.String) */ @Override public boolean viewExists(String viewName) { return viewNames.get().containsKey(named(viewName)); }
3.68
hadoop_DelegatingSSLSocketFactory_bindToOpenSSLProvider
/** * Bind to the OpenSSL provider via wildfly. * This MUST be the only place where wildfly classes are referenced, * so ensuring that any linkage problems only surface here where they may * be caught by the initialization code. */ private void bindToOpenSSLProvider() throws NoSuchAlgorithmException, KeyManagementException { if (!openSSLProviderRegistered) { LOG.debug("Attempting to register OpenSSL provider"); org.wildfly.openssl.OpenSSLProvider.register(); openSSLProviderRegistered = true; } // Strong reference needs to be kept to logger until initialization of // SSLContext finished (see HADOOP-16174): java.util.logging.Logger logger = java.util.logging.Logger.getLogger( "org.wildfly.openssl.SSL"); Level originalLevel = logger.getLevel(); try { logger.setLevel(Level.WARNING); ctx = SSLContext.getInstance("openssl.TLS"); ctx.init(null, null, null); } finally { logger.setLevel(originalLevel); } }
3.68
flink_KubernetesStateHandleStore_replace
/** * Replaces a state handle in ConfigMap and discards the old state handle. Wo do not lock * resource version and then replace in Kubernetes. Since the ConfigMap is periodically updated * by leader, the resource version changes very fast. We use a "check-existence and update" * transactional operation instead. * * @param key Key in ConfigMap * @param resourceVersion resource version when checking existence via {@link #exists}. * @param state State to be added * @throws NotExistException if the name does not exist * @throws PossibleInconsistentStateException if a failure occurred during the update operation. * It's unclear whether the operation actually succeeded or not. No state was discarded. The * method's caller should handle this case properly. * @throws Exception if persisting state or writing state handle failed */ @Override public void replace(String key, StringResourceVersion resourceVersion, T state) throws Exception { checkNotNull(key, "Key in ConfigMap."); checkNotNull(state, "State."); final RetrievableStateHandle<T> newStateHandle = storage.store(state); final byte[] serializedStateHandle = serializeOrDiscard(new StateHandleWithDeleteMarker<>(newStateHandle)); // initialize flags to serve the failure case boolean discardOldState = false; boolean discardNewState = true; // We don't want to greedily pull the old state handle as we have to do that anyway in // replaceEntry method for check of delete markers. final AtomicReference<RetrievableStateHandle<T>> oldStateHandleRef = new AtomicReference<>(); try { final boolean success = updateConfigMap( cm -> { try { return replaceEntry( cm, key, serializedStateHandle, oldStateHandleRef); } catch (NotExistException e) { throw new CompletionException(e); } }) .get(); // swap subject for deletion in case of success discardOldState = success; discardNewState = !success; } catch (Exception ex) { final Optional<PossibleInconsistentStateException> possibleInconsistentStateException = ExceptionUtils.findThrowable(ex, PossibleInconsistentStateException.class); if (possibleInconsistentStateException.isPresent()) { // it's unclear whether the state handle metadata was written to the ConfigMap - // hence, we don't discard any data discardNewState = false; throw possibleInconsistentStateException.get(); } throw ExceptionUtils.findThrowable(ex, NotExistException.class).orElseThrow(() -> ex); } finally { if (discardNewState) { newStateHandle.discardState(); } if (discardOldState) { Objects.requireNonNull( oldStateHandleRef.get(), "state handle should have been set on success") .discardState(); } } }
3.68
flink_UpsertTestFileUtil_readRecords
/** * Reads records that were written using the {@link UpsertTestSinkWriter} from the given * InputStream. * * @param bis The BufferedInputStream to read from * @return Map containing the read ImmutableByteArrayWrapper key-value pairs * @throws IOException */ private static Map<ImmutableByteArrayWrapper, ImmutableByteArrayWrapper> readRecords( BufferedInputStream bis) throws IOException { checkNotNull(bis); Map<ImmutableByteArrayWrapper, ImmutableByteArrayWrapper> records = new HashMap<>(); int magicByte; while ((magicByte = bis.read()) != -1) { if (magicByte != MAGIC_BYTE) { throw new IOException("Data was serialized incorrectly or is corrupted."); } int keyLength = bis.read(); byte[] key = new byte[keyLength]; bis.read(key); int valueLength = bis.read(); byte[] value = new byte[valueLength]; bis.read(value); records.put(new ImmutableByteArrayWrapper(key), new ImmutableByteArrayWrapper(value)); } return records; }
3.68
framework_WindowMoveListener_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { Window w = new Window("Caption"); w.setId("testwindow"); w.setHeight("100px"); w.setWidth("100px"); w.setPositionX(100); w.setPositionY(100); addWindow(w); Button b = new Button(); b.setId("testbutton"); addComponent(b); b.addClickListener(event -> { for (Window window : getWindows()) { window.setPositionX(100); window.setPositionY(100); } }); }
3.68
flink_AbstractStreamOperator_snapshotState
/** * Stream operators with state, which want to participate in a snapshot need to override this * hook method. * * @param context context that provides information and means required for taking a snapshot */ @Override public void snapshotState(StateSnapshotContext context) throws Exception {}
3.68
hadoop_ResourceUsage_getUsed
/* * Used */ public Resource getUsed() { return getUsed(NL); }
3.68
graphhopper_CHStorage_toShortcutPointer
/** * To use the shortcut getters/setters you need to convert shortcut IDs to an shortcutPointer first */ public long toShortcutPointer(int shortcut) { assert shortcut < shortcutCount : "shortcut " + shortcut + " not in bounds [0, " + shortcutCount + "["; return (long) shortcut * shortcutEntryBytes; }
3.68
framework_WebBrowser_isAndroid
/** * Tests if the browser is run on Android. * * @return true if run on Android false if the user is not using Android or * if no information on the browser is present */ public boolean isAndroid() { return browserDetails.isAndroid(); }
3.68
hadoop_QueueResourceQuotas_getEffectiveMaxResource
/* * Effective Maximum Resource */ public Resource getEffectiveMaxResource() { return getEffectiveMaxResource(NL); }
3.68
hbase_MultiTableInputFormatBase_createRecordReader
/** * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default. * @param split The split to work with. * @param context The current context. * @return The newly created record reader. * @throws IOException When creating the reader fails. * @throws InterruptedException when record reader initialization fails * @see InputFormat#createRecordReader(InputSplit, TaskAttemptContext) */ @Override public RecordReader<ImmutableBytesWritable, Result> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { TableSplit tSplit = (TableSplit) split; LOG.info(MessageFormat.format("Input split length: {0} bytes.", tSplit.getLength())); if (tSplit.getTable() == null) { throw new IOException("Cannot create a record reader because of a" + " previous error. Please look at the previous logs lines from" + " the task's full log for more details."); } final Connection connection = ConnectionFactory.createConnection(context.getConfiguration()); Table table = connection.getTable(tSplit.getTable()); if (this.tableRecordReader == null) { this.tableRecordReader = new TableRecordReader(); } final TableRecordReader trr = this.tableRecordReader; try { Scan sc = tSplit.getScan(); sc.withStartRow(tSplit.getStartRow()); sc.withStopRow(tSplit.getEndRow()); trr.setScan(sc); trr.setTable(table); return new RecordReader<ImmutableBytesWritable, Result>() { @Override public void close() throws IOException { trr.close(); connection.close(); } @Override public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return trr.getCurrentKey(); } @Override public Result getCurrentValue() throws IOException, InterruptedException { return trr.getCurrentValue(); } @Override public float getProgress() throws IOException, InterruptedException { return trr.getProgress(); } @Override public void initialize(InputSplit inputsplit, TaskAttemptContext context) throws IOException, InterruptedException { trr.initialize(inputsplit, context); } @Override public boolean nextKeyValue() throws IOException, InterruptedException { return trr.nextKeyValue(); } }; } catch (IOException ioe) { // If there is an exception make sure that all // resources are closed and released. trr.close(); connection.close(); throw ioe; } }
3.68
hadoop_DockerCommandExecutor_isStoppable
/** * Is the container in a stoppable state? * * @param containerStatus the container's {@link DockerContainerStatus}. * @return is the container in a stoppable state. */ public static boolean isStoppable(DockerContainerStatus containerStatus) { if (containerStatus.equals(DockerContainerStatus.RUNNING) || containerStatus.equals(DockerContainerStatus.RESTARTING)) { return true; } return false; }
3.68
flink_CliFrontend_stop
/** * Executes the STOP action. * * @param args Command line arguments for the stop action. */ protected void stop(String[] args) throws Exception { LOG.info("Running 'stop-with-savepoint' command."); final Options commandOptions = CliFrontendParser.getStopCommandOptions(); final CommandLine commandLine = getCommandLine(commandOptions, args, false); final StopOptions stopOptions = new StopOptions(commandLine); if (stopOptions.isPrintHelp()) { CliFrontendParser.printHelpForStop(customCommandLines); return; } final String[] cleanedArgs = stopOptions.getArgs(); final String targetDirectory = stopOptions.hasSavepointFlag() && cleanedArgs.length > 0 ? stopOptions.getTargetDirectory() : null; // the default savepoint location is going to be used in this case. final JobID jobId = cleanedArgs.length != 0 ? parseJobId(cleanedArgs[0]) : parseJobId(stopOptions.getTargetDirectory()); final boolean advanceToEndOfEventTime = stopOptions.shouldAdvanceToEndOfEventTime(); final SavepointFormatType formatType = stopOptions.getFormatType(); logAndSysout( (advanceToEndOfEventTime ? "Draining job " : "Suspending job ") + "\"" + jobId + "\" with a " + formatType + " savepoint."); final CustomCommandLine activeCommandLine = validateAndGetActiveCommandLine(commandLine); runClusterAction( activeCommandLine, commandLine, (clusterClient, effectiveConfiguration) -> { final String savepointPath; try { savepointPath = clusterClient .stopWithSavepoint( jobId, advanceToEndOfEventTime, targetDirectory, formatType) .get( getClientTimeout(effectiveConfiguration).toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { throw new FlinkException( "Could not stop with a savepoint job \"" + jobId + "\".", e); } logAndSysout("Savepoint completed. Path: " + savepointPath); }); }
3.68
flink_CheckpointConfig_isExternalizedCheckpointsEnabled
/** * Returns whether checkpoints should be persisted externally. * * @return <code>true</code> if checkpoints should be externalized. */ @PublicEvolving public boolean isExternalizedCheckpointsEnabled() { return getExternalizedCheckpointCleanup() != ExternalizedCheckpointCleanup.NO_EXTERNALIZED_CHECKPOINTS; }
3.68
flink_WikipediaEditEvent_getTimestamp
/** * Returns the timestamp when this event arrived at the source. * * @return The timestamp assigned at the source. */ public long getTimestamp() { return timestamp; }
3.68
rocketmq-connect_ClusterConfigState_targetState
/** * Get the target state of the connector * * @param connector * @return */ public TargetState targetState(String connector) { return connectorTargetStates.get(connector); }
3.68
pulsar_FieldParser_stringToSet
/** * Converts comma separated string to Set. * * @param <T> * type of set * @param val * comma separated values. * @return The converted set with type {@code <T>}. */ public static <T> Set<T> stringToSet(String val, Class<T> type) { if (val == null) { return null; } String[] tokens = trim(val).split(","); return Arrays.stream(tokens).map(t -> { return convert(trim(t), type); }).collect(Collectors.toCollection(LinkedHashSet::new)); }
3.68
flink_PojoSerializer_createRegisteredSubclassTags
/** * Builds map of registered subclasses to their class tags. Class tags will be integers starting * from 0, assigned incrementally with the order of provided subclasses. */ private static LinkedHashMap<Class<?>, Integer> createRegisteredSubclassTags( LinkedHashSet<Class<?>> registeredSubclasses) { final LinkedHashMap<Class<?>, Integer> classToTag = new LinkedHashMap<>(); int id = 0; for (Class<?> registeredClass : registeredSubclasses) { classToTag.put(registeredClass, id); id++; } return classToTag; }
3.68
flink_JoinInputSideSpec_hasUniqueKey
/** Returns true if the input has unique key, otherwise false. */ public boolean hasUniqueKey() { return inputSideHasUniqueKey; }
3.68
starts_ChecksumUtil_makeCheckSumMap
/** * This method creates the checksum map only for tests that are affected by changes. * * @param loader The classloader from which to find .class files * @param testDeps The transitive closure of dependencies for each test * @param affected The set of tests that are affected by the changes * @return The checksum map */ public static Map<String, Set<RegData>> makeCheckSumMap(ClassLoader loader, Map<String, Set<String>> testDeps, Set<String> affected) { Map<String, Set<RegData>> checksums = new HashMap<>(); ChecksumUtil checksumUtil = new ChecksumUtil(true); for (String test : affected) { checksums.put(test, new HashSet<RegData>()); URL url = loader.getResource(toClassName(test)); checksums.get(test).add(checksumUtil.computeChecksumRegData(url)); long start = System.currentTimeMillis(); for (String dep : testDeps.get(test)) { String className = toClassName(dep); if (!Types.isIgnorableInternalName(className)) { url = loader.getResource(className); if (url != null) { if (!isWellKnownUrl(url.toExternalForm())) { checksums.get(test).add(checksumUtil.computeChecksumRegData(url)); } } else { // Known benign cases where this can happen: (i) dep is from a shaded jar which is itself on // the classpath; (ii) dep is from an optional jar dependency of a direct jar dependency (e.g., // users of joda-time-*.jar do not necessarily depend on classes from joda-convert-8.jar LOGGER.log(Level.FINEST, "@@LoadedNullURLForDep: " + dep); } } } long end = System.currentTimeMillis(); LOGGER.log(Level.FINEST, "LOADED RESOURCES: " + (end - start) + MILLISECOND); } return checksums; }
3.68
framework_MenuTooltip_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { addComponent(buildMenu()); getTooltipConfiguration().setOpenDelay(2000); }
3.68
framework_VColorPickerGradient_getCursorX
/** * Returns the latest x-coordinate for pressed-down mouse cursor. * * @return the latest x-coordinate */ public int getCursorX() { return cursorX; }
3.68
hadoop_HSAuditLogger_logSuccess
/** * Create a readable and parseable audit log string for a successful event. * * @param user * User who made the service request. * @param operation * Operation requested by the user. * @param target * The target on which the operation is being performed. * * <br> * <br> * Note that the {@link HSAuditLogger} uses tabs ('\t') as a key-val * delimiter and hence the value fields should not contains tabs * ('\t'). */ public static void logSuccess(String user, String operation, String target) { if (LOG.isInfoEnabled()) { LOG.info(createSuccessLog(user, operation, target)); } }
3.68
framework_VDragEvent_getTransferable
/** * Returns the VTransferable instance that represents the original dragged * element. * * @return the transferable instance */ public VTransferable getTransferable() { return transferable; }
3.68
hbase_UnsafeAccess_putShort
// APIs to add primitives to BBs /** * Put a short value out to the specified BB position in big-endian format. * @param buf the byte buffer * @param offset position in the buffer * @param val short to write out * @return incremented offset */ public static int putShort(ByteBuffer buf, int offset, short val) { if (LITTLE_ENDIAN) { val = Short.reverseBytes(val); } if (buf.isDirect()) { HBasePlatformDependent.putShort(directBufferAddress(buf) + offset, val); } else { HBasePlatformDependent.putShort(buf.array(), BYTE_ARRAY_BASE_OFFSET + buf.arrayOffset() + offset, val); } return offset + Bytes.SIZEOF_SHORT; }
3.68
querydsl_ExpressionUtils_toExpression
/** * Converts the given object to an Expression * * <p>Casts expressions and wraps everything else into co</p> * * @param o object to convert * @return converted argument */ public static Expression<?> toExpression(Object o) { if (o instanceof Expression) { return (Expression<?>) o; } else { return ConstantImpl.create(o); } }
3.68
hadoop_Exec_getOutput
/** * Get every line consumed from the input. * * @return Every line consumed from the input */ public List<String> getOutput() { return output; }
3.68
hadoop_AzureNativeFileSystemStore_getInstrumentedContext
/** * Creates a new OperationContext for the Azure Storage operation that has * listeners hooked to it that will update the metrics for this file system. * * @param bindConcurrentOOBIo * - bind to intercept send request call backs to handle OOB I/O. * * @return The OperationContext object to use. */ private OperationContext getInstrumentedContext(boolean bindConcurrentOOBIo) { OperationContext operationContext = new OperationContext(); // Set User-Agent operationContext.getSendingRequestEventHandler().addListener(new StorageEvent<SendingRequestEvent>() { @Override public void eventOccurred(SendingRequestEvent eventArg) { HttpURLConnection connection = (HttpURLConnection) eventArg.getConnectionObject(); String userAgentInfo = String.format(Utility.LOCALE_US, "WASB/%s (%s) %s", VersionInfo.getVersion(), userAgentId, BaseRequest.getUserAgent()); connection.setRequestProperty(Constants.HeaderConstants.USER_AGENT, userAgentInfo); } }); if (selfThrottlingEnabled) { SelfThrottlingIntercept.hook(operationContext, selfThrottlingReadFactor, selfThrottlingWriteFactor); } else if (autoThrottlingEnabled) { ClientThrottlingIntercept.hook(operationContext); } if (bandwidthGaugeUpdater != null) { //bandwidthGaugeUpdater is null when we config to skip azure metrics ResponseReceivedMetricUpdater.hook( operationContext, instrumentation, bandwidthGaugeUpdater); } // Bind operation context to receive send request callbacks on this operation. // If reads concurrent to OOB writes are allowed, the interception will reset // the conditional header on all Azure blob storage read requests. if (bindConcurrentOOBIo) { SendRequestIntercept.bind(operationContext); } if (testHookOperationContext != null) { operationContext = testHookOperationContext.modifyOperationContext(operationContext); } ErrorMetricUpdater.hook(operationContext, instrumentation); // Return the operation context. return operationContext; }
3.68
flink_HashPartition_getNumOccupiedMemorySegments
/** * Gets the number of memory segments used by this partition, which includes build side memory * buffers and overflow memory segments. * * @return The number of occupied memory segments. */ public int getNumOccupiedMemorySegments() { // either the number of memory segments, or one for spilling final int numPartitionBuffers = this.partitionBuffers != null ? this.partitionBuffers.length : this.buildSideWriteBuffer.getNumOccupiedMemorySegments(); return numPartitionBuffers + numOverflowSegments; }
3.68
flink_JoinOperator_projectTuple20
/** * Projects a pair of joined elements to a {@link Tuple} with the previously selected * fields. Requires the classes of the fields of the resulting tuples. * * @return The projected data set. * @see Tuple * @see DataSet */ public < T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19> ProjectJoin< I1, I2, Tuple20< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>> projectTuple20() { TypeInformation<?>[] fTypes = extractFieldTypes(fieldIndexes); TupleTypeInfo< Tuple20< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>> tType = new TupleTypeInfo< Tuple20< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>>(fTypes); return new ProjectJoin< I1, I2, Tuple20< T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15, T16, T17, T18, T19>>( this.ds1, this.ds2, this.keys1, this.keys2, this.hint, this.fieldIndexes, this.isFieldInFirst, tType, this); }
3.68
hudi_HoodieBackedTableMetadataWriter_getFileNameToSizeMap
// Returns a map of filenames mapped to their lengths Map<String, Long> getFileNameToSizeMap() { return filenameToSizeMap; }
3.68
pulsar_LoadSimulationController_handleStop
// Handle the command line arguments associated with the stop command. private void handleStop(final ShellArguments arguments) throws Exception { final List<String> commandArguments = arguments.commandArguments; // Stop expects three application arguments: tenant name, namespace // name, and topic name. if (checkAppArgs(commandArguments.size() - 1, 3)) { final String topic = makeTopic(commandArguments.get(1), commandArguments.get(2), commandArguments.get(3)); for (DataOutputStream outputStream : outputStreams) { outputStream.write(LoadSimulationClient.STOP_COMMAND); outputStream.writeUTF(topic); outputStream.flush(); } } }
3.68
flink_ZooKeeperStateHandleStore_release
/** * Releases the lock from the node under the given ZooKeeper path. If no lock exists, then * nothing happens. * * @param pathInZooKeeper Path describing the ZooKeeper node * @throws Exception if the delete operation of the lock node fails */ @Override public void release(String pathInZooKeeper) throws Exception { final String path = normalizePath(pathInZooKeeper); final String lockPath = getInstanceLockPath(path); try { deleteIfExists(lockPath); } catch (Exception e) { throw new Exception("Could not release the lock: " + lockPath + '.', e); } }
3.68
hbase_DefaultMemStore_main
/** * Code to help figure if our approximation of object heap sizes is close enough. See hbase-900. * Fills memstores then waits so user can heap dump and bring up resultant hprof in something like * jprofiler which allows you get 'deep size' on objects. * @param args main args */ public static void main(String[] args) { RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean(); LOG.info("vmName=" + runtime.getVmName() + ", vmVendor=" + runtime.getVmVendor() + ", vmVersion=" + runtime.getVmVersion()); LOG.info("vmInputArguments=" + runtime.getInputArguments()); DefaultMemStore memstore1 = new DefaultMemStore(); // TODO: x32 vs x64 final int count = 10000; byte[] fam = Bytes.toBytes("col"); byte[] qf = Bytes.toBytes("umn"); byte[] empty = new byte[0]; MemStoreSizing memStoreSizing = new NonThreadSafeMemStoreSizing(); for (int i = 0; i < count; i++) { // Give each its own ts memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memStoreSizing); } LOG.info("memstore1 estimated size={}", memStoreSizing.getMemStoreSize().getDataSize() + memStoreSizing.getMemStoreSize().getHeapSize()); for (int i = 0; i < count; i++) { memstore1.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, empty), memStoreSizing); } LOG.info("memstore1 estimated size (2nd loading of same data)={}", memStoreSizing.getMemStoreSize().getDataSize() + memStoreSizing.getMemStoreSize().getHeapSize()); // Make a variably sized memstore. DefaultMemStore memstore2 = new DefaultMemStore(); memStoreSizing = new NonThreadSafeMemStoreSizing(); for (int i = 0; i < count; i++) { memstore2.add(new KeyValue(Bytes.toBytes(i), fam, qf, i, new byte[i]), memStoreSizing); } LOG.info("memstore2 estimated size={}", memStoreSizing.getMemStoreSize().getDataSize() + memStoreSizing.getMemStoreSize().getHeapSize()); final int seconds = 30; LOG.info("Waiting " + seconds + " seconds while heap dump is taken"); LOG.info("Exiting."); }
3.68
hbase_RegionCoprocessorHost_preMemStoreCompactionCompact
/** * Invoked before compacting memstore. */ public InternalScanner preMemStoreCompactionCompact(HStore store, InternalScanner scanner) throws IOException { if (coprocEnvironments.isEmpty()) { return scanner; } return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, InternalScanner>( regionObserverGetter, scanner) { @Override public InternalScanner call(RegionObserver observer) throws IOException { return observer.preMemStoreCompactionCompact(this, store, getResult()); } }); }
3.68
hadoop_SnappyDecompressor_needsInput
/** * Returns true if the input data buffer is empty and * {@link #setInput(byte[], int, int)} should be called to * provide more input. * * @return <code>true</code> if the input data buffer is empty and * {@link #setInput(byte[], int, int)} should be called in * order to provide more input. */ @Override public boolean needsInput() { // Consume remaining compressed data? if (uncompressedDirectBuf.remaining() > 0) { return false; } // Check if snappy has consumed all input if (compressedDirectBufLen <= 0) { // Check if we have consumed all user-input if (userBufLen <= 0) { return true; } else { setInputFromSavedData(); } } return false; }
3.68
framework_DragStartEvent_getComponent
/** * Returns the drag source component where the dragstart event occurred. * * @return Component which is dragged. */ @Override @SuppressWarnings("unchecked") public T getComponent() { return (T) super.getComponent(); }
3.68
dubbo_CollectionUtils_ofSet
/** * Convert to multiple values to be {@link LinkedHashSet} * * @param values one or more values * @param <T> the type of <code>values</code> * @return read-only {@link Set} */ public static <T> Set<T> ofSet(T... values) { int size = values == null ? 0 : values.length; if (size < 1) { return emptySet(); } float loadFactor = 1f / ((size + 1) * 1.0f); if (loadFactor > 0.75f) { loadFactor = 0.75f; } Set<T> elements = new LinkedHashSet<>(size, loadFactor); for (int i = 0; i < size; i++) { elements.add(values[i]); } return unmodifiableSet(elements); }
3.68
framework_TouchScrollDelegate_enableTouchScrolling
/** * Makes the given elements scrollable, either natively or by using a * TouchScrollDelegate, depending on platform capabilities. * * @param widget * The widget that contains scrollable elements * @param scrollables * The elements inside the widget that should be scrollable * @return A scroll handler for the given widget. */ public static TouchScrollHandler enableTouchScrolling(Widget widget, Element... scrollables) { TouchScrollHandler handler = GWT.create(TouchScrollHandler.class); handler.init(widget, scrollables); return handler; }
3.68
flink_TimestampData_getMillisecond
/** Returns the number of milliseconds since {@code 1970-01-01 00:00:00}. */ public long getMillisecond() { return millisecond; }
3.68
framework_CommErrorEmulatorUI_setup
// Server exceptions will occur in this test as we are writing the response // here and not letting the servlet write it @Override protected void setup(VaadinRequest request) { String transport = request.getParameter("transport"); if ("websocket".equalsIgnoreCase(transport)) { log("Using websocket"); } else if ("websocket-xhr".equalsIgnoreCase(transport)) { log("Using websocket for push only"); } else if ("long-polling".equalsIgnoreCase(transport)) { log("Using long-polling"); } else { log("Using XHR"); } getLayout().setSpacing(true); addComponent(createConfigPanel()); addComponent(createServerConfigPanel()); addComponent(new Button("Say hello", event -> log("Hello"))); }
3.68
flink_TestSignalHandler_handle
/** * Handle an incoming signal. * * @param signal The incoming signal */ @Override public void handle(Signal signal) { LOG.warn( "RECEIVED SIGNAL {}: SIG{}. Shutting down as requested.", signal.getNumber(), signal.getName()); prevHandler.handle(signal); }
3.68
flink_CompositeType_hasField
/** Returns true when this type has a composite field with the given name. */ @PublicEvolving public boolean hasField(String fieldName) { return getFieldIndex(fieldName) >= 0; }
3.68