language
stringclasses 1
value | repo
stringclasses 60
values | path
stringlengths 22
294
| class_span
dict | source
stringlengths 13
1.16M
| target
stringlengths 1
113
|
|---|---|---|---|---|---|
java
|
apache__avro
|
lang/java/ipc/src/test/java/org/apache/avro/generic/TestBuilderCopy.java
|
{
"start": 1137,
"end": 1893
}
|
class ____ {
@Test
void builderCopy() {
StringablesRecord.Builder builder = StringablesRecord.newBuilder();
builder.setValue(new BigDecimal("1314.11"));
HashMap<String, BigDecimal> mapWithBigDecimalElements = new HashMap<>();
mapWithBigDecimalElements.put("testElement", new BigDecimal("220.11"));
builder.setMapWithBigDecimalElements(mapWithBigDecimalElements);
HashMap<BigInteger, String> mapWithBigIntKeys = new HashMap<>();
mapWithBigIntKeys.put(BigInteger.ONE, "testKey");
builder.setMapWithBigIntKeys(mapWithBigIntKeys);
StringablesRecord original = builder.build();
StringablesRecord duplicate = StringablesRecord.newBuilder(original).build();
assertEquals(duplicate, original);
}
}
|
TestBuilderCopy
|
java
|
apache__flink
|
flink-core/src/main/java/org/apache/flink/util/Visitable.java
|
{
"start": 1579,
"end": 2493
}
|
interface ____<T extends Visitable<T>> {
/**
* Contains the logic to invoke the visitor and continue the traversal. Typically invokes the
* pre-visit method of the visitor, then sends the visitor to the children (or predecessors) and
* then invokes the post-visit method.
*
* <p>A typical code example is the following:
*
* <pre>{@code
* public void accept(Visitor<Operator> visitor) {
* boolean descend = visitor.preVisit(this);
* if (descend) {
* if (this.input != null) {
* this.input.accept(visitor);
* }
* visitor.postVisit(this);
* }
* }
* }</pre>
*
* @param visitor The visitor to be called with this object as the parameter.
* @see Visitor#preVisit(Visitable)
* @see Visitor#postVisit(Visitable)
*/
void accept(Visitor<T> visitor);
}
|
Visitable
|
java
|
apache__camel
|
components/camel-consul/src/test/java/org/apache/camel/component/consul/cluster/ConsulClusteredRoutePolicyIT.java
|
{
"start": 1691,
"end": 4634
}
|
class ____ {
@RegisterExtension
public static ConsulService service = ConsulServiceFactory.createService();
private static final Logger LOGGER = LoggerFactory.getLogger(ConsulClusteredRoutePolicyIT.class);
private static final List<String> CLIENTS = IntStream.range(0, 3).mapToObj(Integer::toString).toList();
private static final List<String> RESULTS = new ArrayList<>();
private static final ScheduledExecutorService SCHEDULER = Executors.newScheduledThreadPool(CLIENTS.size() * 2);
private static final CountDownLatch LATCH = new CountDownLatch(CLIENTS.size());
// ************************************
// Test
// ************************************
@Test
public void test() throws Exception {
for (String id : CLIENTS) {
SCHEDULER.submit(() -> run(id));
}
LATCH.await(1, TimeUnit.MINUTES);
SCHEDULER.shutdownNow();
Assertions.assertEquals(CLIENTS.size(), RESULTS.size());
Assertions.assertTrue(RESULTS.containsAll(CLIENTS));
}
// ************************************
// Run a Camel node
// ************************************
private static void run(String id) {
try {
int events = ThreadLocalRandom.current().nextInt(2, 6);
CountDownLatch contextLatch = new CountDownLatch(events);
ConsulClusterService consulClusterService = new ConsulClusterService();
consulClusterService.setId("node-" + id);
consulClusterService.setUrl(service.getConsulUrl());
LOGGER.info("Consul URL {}", consulClusterService.getUrl());
DefaultCamelContext context = new DefaultCamelContext();
context.disableJMX();
context.getCamelContextExtension().setName("context-" + id);
context.addService(consulClusterService);
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from("timer:consul?delay=1000&period=1000").routeId("route-" + id)
.routePolicy(ClusteredRoutePolicy.forNamespace("my-ns")).log("From ${routeId}")
.process(e -> contextLatch.countDown());
}
});
// Start the context after some random time so the startup order
// changes for each test.
Awaitility.await().pollDelay(ThreadLocalRandom.current().nextInt(500), TimeUnit.MILLISECONDS)
.untilAsserted(() -> Assertions.assertDoesNotThrow(context::start));
context.start();
contextLatch.await();
LOGGER.debug("Shutting down node {}", id);
RESULTS.add(id);
context.stop();
LATCH.countDown();
} catch (Exception e) {
LOGGER.warn("{}", e.getMessage(), e);
}
}
}
|
ConsulClusteredRoutePolicyIT
|
java
|
elastic__elasticsearch
|
server/src/test/java/org/elasticsearch/script/ScriptContextTests.java
|
{
"start": 955,
"end": 1051
}
|
interface ____ {
String typoNewInstanceMethod(int foo);
}
public
|
MissingNewInstance
|
java
|
elastic__elasticsearch
|
x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/CreateTrainedModelAssignmentActionRequestTests.java
|
{
"start": 516,
"end": 1061
}
|
class ____ extends AbstractWireSerializingTestCase<Request> {
@Override
protected Request createTestInstance() {
return new Request(StartTrainedModelDeploymentTaskParamsTests.createRandom(), null);
}
@Override
protected Request mutateInstance(Request instance) {
return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929
}
@Override
protected Writeable.Reader<Request> instanceReader() {
return Request::new;
}
}
|
CreateTrainedModelAssignmentActionRequestTests
|
java
|
alibaba__druid
|
core/src/test/java/com/alibaba/druid/bvt/sql/mysql/param/MySqlParameterizedOutputVisitorTest_58.java
|
{
"start": 331,
"end": 1419
}
|
class ____ extends TestCase {
final DbType dbType = JdbcConstants.MYSQL;
public void test_for_parameterize() throws Exception {
String sql = "select * from t where id = 101";
List<Object> params = new ArrayList<Object>();
String psql = ParameterizedOutputVisitorUtils.parameterize(sql, dbType, params);
assertEquals("SELECT *\n" +
"FROM t\n" +
"WHERE id = ?", psql);
assertEquals(1, params.size());
assertEquals(101, params.get(0));
}
public void test_for_parameterize_insert() throws Exception {
String sql = "insert into mytab(fid,fname)values(1001,'wenshao');";
List<Object> params = new ArrayList<Object>();
String psql = ParameterizedOutputVisitorUtils.parameterize(sql, dbType, params);
assertEquals("INSERT INTO mytab(fid, fname)\n" +
"VALUES (?, ?);", psql);
assertEquals(2, params.size());
assertEquals(1001, params.get(0));
assertEquals("wenshao", params.get(1));
}
}
|
MySqlParameterizedOutputVisitorTest_58
|
java
|
apache__flink
|
flink-end-to-end-tests/flink-tpch-test/src/main/java/org/apache/flink/table/tpch/TpchDataGenerator.java
|
{
"start": 1227,
"end": 5051
}
|
class ____ {
public static final int QUERY_NUM = 22;
public static void main(String[] args) throws IOException {
if (args.length != 2) {
System.out.println("Exactly 1 double value and 1 path should be provided as argument");
return;
}
double scale = Double.valueOf(args[0]);
String path = args[1];
generateTable(scale, path);
generateQuery(path);
generateExpected(path);
}
private static void generateTable(double scale, String path) throws IOException {
File dir = new File(path + "/table");
dir.mkdir();
for (TpchTable table : TpchTable.getTables()) {
Iterable generator = table.createGenerator(scale, 1, 1);
StringBuilder builder = new StringBuilder();
generator.forEach(
s -> {
String line = ((TpchEntity) s).toLine().trim();
if (line.endsWith("|")) {
line = line.substring(0, line.length() - 1);
}
builder.append(line).append('\n');
});
try (BufferedWriter writer =
new BufferedWriter(
new OutputStreamWriter(
new FileOutputStream(
path + "/table/" + table.getTableName() + ".csv")))) {
writer.write(builder.toString());
}
}
}
private static void generateQuery(String path) throws IOException {
File dir = new File(path + "/query");
dir.mkdir();
for (int i = 0; i < QUERY_NUM; i++) {
try (InputStream in =
TpchDataGenerator.class.getResourceAsStream(
"/io/airlift/tpch/queries/q" + (i + 1) + ".sql");
OutputStream out = new FileOutputStream(path + "/query/q" + (i + 1) + ".sql")) {
byte[] buffer = new byte[4096];
int bytesRead = 0;
while ((bytesRead = in.read(buffer)) > 0) {
out.write(buffer, 0, bytesRead);
}
}
}
}
private static void generateExpected(String path) throws IOException {
File dir = new File(path + "/expected");
dir.mkdir();
for (int i = 0; i < QUERY_NUM; i++) {
try (BufferedReader reader =
new BufferedReader(
new InputStreamReader(
TpchDataGenerator.class.getResourceAsStream(
"/io/airlift/tpch/queries/q"
+ (i + 1)
+ ".result")));
BufferedWriter writer =
new BufferedWriter(
new OutputStreamWriter(
new FileOutputStream(
path + "/expected/q" + (i + 1) + ".csv")))) {
int lineNumber = 0;
String line;
while ((line = reader.readLine()) != null) {
line = line.trim().replace("null", "");
lineNumber++;
if (lineNumber == 1) {
continue;
}
if (line.length() > 0 && line.endsWith("|")) {
line = line.substring(0, line.length() - 1);
}
writer.write(line + "\n");
}
}
}
}
}
|
TpchDataGenerator
|
java
|
apache__hadoop
|
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Merger.java
|
{
"start": 21197,
"end": 33738
}
|
class ____ the inmem segment's byte[].
//When we load the value bytes from disk, we shouldn't use
//the same byte[] since it would corrupt the data in the inmem
//segment. So we maintain an explicit DIB for value bytes
//obtained from disk, and if the current segment is a disk
//segment, we reset the "value" DIB to the byte[] in that (so
//we reuse the disk segment DIB whenever we consider
//a disk segment).
minSegment.getValue(diskIFileValue);
value.reset(diskIFileValue.getData(), diskIFileValue.getLength());
} else {
minSegment.getValue(value);
}
long endPos = minSegment.getReader().bytesRead;
totalBytesProcessed += endPos - startPos;
mergeProgress.set(Math.min(1.0f, totalBytesProcessed * progPerByte));
return true;
}
@SuppressWarnings("unchecked")
protected boolean lessThan(Object a, Object b) {
DataInputBuffer key1 = ((Segment<K, V>)a).getKey();
DataInputBuffer key2 = ((Segment<K, V>)b).getKey();
int s1 = key1.getPosition();
int l1 = key1.getLength() - s1;
int s2 = key2.getPosition();
int l2 = key2.getLength() - s2;
return comparator.compare(key1.getData(), s1, l1, key2.getData(), s2, l2) < 0;
}
public RawKeyValueIterator merge(Class<K> keyClass, Class<V> valueClass,
int factor, Path tmpDir,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
return merge(keyClass, valueClass, factor, 0, tmpDir,
readsCounter, writesCounter, mergePhase);
}
RawKeyValueIterator merge(Class<K> keyClass, Class<V> valueClass,
int factor, int inMem, Path tmpDir,
Counters.Counter readsCounter,
Counters.Counter writesCounter,
Progress mergePhase)
throws IOException {
LOG.info("Merging " + segments.size() + " sorted segments");
/*
* If there are inMemory segments, then they come first in the segments
* list and then the sorted disk segments. Otherwise(if there are only
* disk segments), then they are sorted segments if there are more than
* factor segments in the segments list.
*/
int numSegments = segments.size();
int origFactor = factor;
int passNo = 1;
if (mergePhase != null) {
mergeProgress = mergePhase;
}
long totalBytes = computeBytesInMerges(factor, inMem);
if (totalBytes != 0) {
progPerByte = 1.0f / (float)totalBytes;
}
//create the MergeStreams from the sorted map created in the constructor
//and dump the final output to a file
do {
//get the factor for this pass of merge. We assume in-memory segments
//are the first entries in the segment list and that the pass factor
//doesn't apply to them
factor = getPassFactor(factor, passNo, numSegments - inMem);
if (1 == passNo) {
factor += inMem;
}
List<Segment<K, V>> segmentsToMerge =
new ArrayList<Segment<K, V>>();
int segmentsConsidered = 0;
int numSegmentsToConsider = factor;
long startBytes = 0; // starting bytes of segments of this merge
while (true) {
//extract the smallest 'factor' number of segments
//Call cleanup on the empty segments (no key/value data)
List<Segment<K, V>> mStream =
getSegmentDescriptors(numSegmentsToConsider);
for (Segment<K, V> segment : mStream) {
// Initialize the segment at the last possible moment;
// this helps in ensuring we don't use buffers until we need them
segment.init(readsCounter);
long startPos = segment.getReader().bytesRead;
boolean hasNext = segment.nextRawKey();
long endPos = segment.getReader().bytesRead;
if (hasNext) {
startBytes += endPos - startPos;
segmentsToMerge.add(segment);
segmentsConsidered++;
}
else {
segment.close();
numSegments--; //we ignore this segment for the merge
}
}
//if we have the desired number of segments
//or looked at all available segments, we break
if (segmentsConsidered == factor ||
segments.size() == 0) {
break;
}
numSegmentsToConsider = factor - segmentsConsidered;
}
//feed the streams to the priority queue
initialize(segmentsToMerge.size());
clear();
for (Segment<K, V> segment : segmentsToMerge) {
put(segment);
}
//if we have lesser number of segments remaining, then just return the
//iterator, else do another single level merge
if (numSegments <= factor) {
if (!includeFinalMerge) { // for reduce task
// Reset totalBytesProcessed and recalculate totalBytes from the
// remaining segments to track the progress of the final merge.
// Final merge is considered as the progress of the reducePhase,
// the 3rd phase of reduce task.
totalBytesProcessed = 0;
totalBytes = 0;
for (int i = 0; i < segmentsToMerge.size(); i++) {
totalBytes += segmentsToMerge.get(i).getRawDataLength();
}
}
if (totalBytes != 0) //being paranoid
progPerByte = 1.0f / (float)totalBytes;
totalBytesProcessed += startBytes;
if (totalBytes != 0)
mergeProgress.set(Math.min(1.0f, totalBytesProcessed * progPerByte));
else
mergeProgress.set(1.0f); // Last pass and no segments left - we're done
LOG.info("Down to the last merge-pass, with " + numSegments +
" segments left of total size: " +
(totalBytes - totalBytesProcessed) + " bytes");
return this;
} else {
LOG.info("Merging " + segmentsToMerge.size() +
" intermediate segments out of a total of " +
(segments.size()+segmentsToMerge.size()));
long bytesProcessedInPrevMerges = totalBytesProcessed;
totalBytesProcessed += startBytes;
//we want to spread the creation of temp files on multiple disks if
//available under the space constraints
long approxOutputSize = 0;
for (Segment<K, V> s : segmentsToMerge) {
approxOutputSize += s.getLength() +
ChecksumFileSystem.getApproxChkSumLength(
s.getLength());
}
Path tmpFilename =
new Path(tmpDir, "intermediate").suffix("." + passNo);
Path outputFile = lDirAlloc.getLocalPathForWrite(
tmpFilename.toString(),
approxOutputSize, conf);
FSDataOutputStream out = fs.create(outputFile);
out = IntermediateEncryptedStream.wrapIfNecessary(conf, out,
outputFile);
Writer<K, V> writer = new Writer<K, V>(conf, out, keyClass, valueClass,
codec, writesCounter, true);
writeFile(this, writer, reporter, conf);
writer.close();
//we finished one single level merge; now clean up the priority
//queue
this.close();
// Add the newly create segment to the list of segments to be merged
Segment<K, V> tempSegment =
new Segment<K, V>(conf, fs, outputFile, codec, false);
// Insert new merged segment into the sorted list
int pos = Collections.binarySearch(segments, tempSegment,
segmentComparator);
if (pos < 0) {
// binary search failed. So position to be inserted at is -pos-1
pos = -pos-1;
}
segments.add(pos, tempSegment);
numSegments = segments.size();
// Subtract the difference between expected size of new segment and
// actual size of new segment(Expected size of new segment is
// inputBytesOfThisMerge) from totalBytes. Expected size and actual
// size will match(almost) if combiner is not called in merge.
long inputBytesOfThisMerge = totalBytesProcessed -
bytesProcessedInPrevMerges;
totalBytes -= inputBytesOfThisMerge - tempSegment.getRawDataLength();
if (totalBytes != 0) {
progPerByte = 1.0f / (float)totalBytes;
}
passNo++;
}
//we are worried about only the first pass merge factor. So reset the
//factor to what it originally was
factor = origFactor;
} while(true);
}
/**
* Determine the number of segments to merge in a given pass. Assuming more
* than factor segments, the first pass should attempt to bring the total
* number of segments - 1 to be divisible by the factor - 1 (each pass
* takes X segments and produces 1) to minimize the number of merges.
*/
private int getPassFactor(int factor, int passNo, int numSegments) {
if (passNo > 1 || numSegments <= factor || factor == 1)
return factor;
int mod = (numSegments - 1) % (factor - 1);
if (mod == 0)
return factor;
return mod + 1;
}
/** Return (& remove) the requested number of segment descriptors from the
* sorted map.
*/
private List<Segment<K, V>> getSegmentDescriptors(int numDescriptors) {
if (numDescriptors > segments.size()) {
List<Segment<K, V>> subList = new ArrayList<Segment<K,V>>(segments);
segments.clear();
return subList;
}
List<Segment<K, V>> subList =
new ArrayList<Segment<K,V>>(segments.subList(0, numDescriptors));
for (int i=0; i < numDescriptors; ++i) {
segments.remove(0);
}
return subList;
}
/**
* Compute expected size of input bytes to merges, will be used in
* calculating mergeProgress. This simulates the above merge() method and
* tries to obtain the number of bytes that are going to be merged in all
* merges(assuming that there is no combiner called while merging).
* @param factor mapreduce.task.io.sort.factor
* @param inMem number of segments in memory to be merged
*/
long computeBytesInMerges(int factor, int inMem) {
int numSegments = segments.size();
List<Long> segmentSizes = new ArrayList<Long>(numSegments);
long totalBytes = 0;
int n = numSegments - inMem;
// factor for 1st pass
int f = getPassFactor(factor, 1, n) + inMem;
n = numSegments;
for (int i = 0; i < numSegments; i++) {
// Not handling empty segments here assuming that it would not affect
// much in calculation of mergeProgress.
segmentSizes.add(segments.get(i).getRawDataLength());
}
// If includeFinalMerge is true, allow the following while loop iterate
// for 1 more iteration. This is to include final merge as part of the
// computation of expected input bytes of merges
boolean considerFinalMerge = includeFinalMerge;
while (n > f || considerFinalMerge) {
if (n <=f ) {
considerFinalMerge = false;
}
long mergedSize = 0;
f = Math.min(f, segmentSizes.size());
for (int j = 0; j < f; j++) {
mergedSize += segmentSizes.remove(0);
}
totalBytes += mergedSize;
// insert new size into the sorted list
int pos = Collections.binarySearch(segmentSizes, mergedSize);
if (pos < 0) {
pos = -pos-1;
}
segmentSizes.add(pos, mergedSize);
n -= (f-1);
f = factor;
}
return totalBytes;
}
public Progress getProgress() {
return mergeProgress;
}
}
}
|
to
|
java
|
spring-projects__spring-boot
|
loader/spring-boot-loader-tools/src/main/java/org/springframework/boot/loader/tools/ReachabilityMetadataProperties.java
|
{
"start": 893,
"end": 2578
}
|
class ____ {
/**
* Location of the properties file. Must be formatted using
* {@link String#format(String, Object...)} with the group id, artifact id and version
* of the dependency.
*/
public static final String REACHABILITY_METADATA_PROPERTIES_LOCATION_TEMPLATE = "META-INF/native-image/%s/%s/%s/reachability-metadata.properties";
private final Properties properties;
private ReachabilityMetadataProperties(Properties properties) {
this.properties = properties;
}
/**
* Returns if the dependency has been overridden.
* @return true if the dependency has been overridden
*/
public boolean isOverridden() {
return Boolean.parseBoolean(this.properties.getProperty("override"));
}
/**
* Constructs a new instance from the given {@code InputStream}.
* @param inputStream {@code InputStream} to load the properties from
* @return loaded properties
* @throws IOException if loading from the {@code InputStream} went wrong
*/
public static ReachabilityMetadataProperties fromInputStream(InputStream inputStream) throws IOException {
Properties properties = new Properties();
properties.load(inputStream);
return new ReachabilityMetadataProperties(properties);
}
/**
* Returns the location of the properties for the given coordinates.
* @param coordinates library coordinates for which the property file location should
* be returned
* @return location of the properties
*/
public static String getLocation(LibraryCoordinates coordinates) {
return REACHABILITY_METADATA_PROPERTIES_LOCATION_TEMPLATE.formatted(coordinates.getGroupId(),
coordinates.getArtifactId(), coordinates.getVersion());
}
}
|
ReachabilityMetadataProperties
|
java
|
apache__flink
|
flink-python/src/main/java/org/apache/flink/table/runtime/operators/python/table/EmbeddedPythonTableFunctionOperator.java
|
{
"start": 2062,
"end": 7228
}
|
class ____ extends AbstractEmbeddedStatelessFunctionOperator {
private static final long serialVersionUID = 1L;
/** The Python {@link TableFunction} to be executed. */
private final PythonFunctionInfo tableFunction;
/** The correlate join type. */
private final FlinkJoinType joinType;
/** The GenericRowData reused holding the null execution result of python udf. */
private GenericRowData reuseNullResultRowData;
/** The JoinedRowData reused holding the execution result. */
private transient JoinedRowData reuseJoinedRow;
public EmbeddedPythonTableFunctionOperator(
Configuration config,
PythonFunctionInfo tableFunction,
RowType inputType,
RowType udfInputType,
RowType udfOutputType,
FlinkJoinType joinType,
int[] udfInputOffsets) {
super(config, inputType, udfInputType, udfOutputType, udfInputOffsets);
this.tableFunction = Preconditions.checkNotNull(tableFunction);
Preconditions.checkArgument(
joinType == FlinkJoinType.INNER || joinType == FlinkJoinType.LEFT,
"The join type should be inner join or left join");
this.joinType = joinType;
}
@Override
public void open() throws Exception {
super.open();
reuseJoinedRow = new JoinedRowData();
reuseNullResultRowData = new GenericRowData(udfOutputType.getFieldCount());
for (int i = 0; i < udfOutputType.getFieldCount(); i++) {
reuseNullResultRowData.setField(i, null);
}
}
@Override
public void openPythonInterpreter() {
// from pyflink.fn_execution.embedded.operation_utils import
// create_table_operation_from_proto
//
// proto = xxx
// table_operation = create_table_operation_from_proto(
// proto, input_coder_proto, output_coder_proto)
// table_operation.open()
interpreter.exec(
"from pyflink.fn_execution.embedded.operation_utils import create_table_operation_from_proto");
interpreter.set(
"input_coder_proto",
createFlattenRowTypeCoderInfoDescriptorProto(
udfInputType, FlinkFnApi.CoderInfoDescriptor.Mode.MULTIPLE, false)
.toByteArray());
interpreter.set(
"output_coder_proto",
createFlattenRowTypeCoderInfoDescriptorProto(
udfOutputType, FlinkFnApi.CoderInfoDescriptor.Mode.MULTIPLE, false)
.toByteArray());
interpreter.set(
"proto",
ProtoUtils.createUserDefinedFunctionsProto(
getRuntimeContext(),
new PythonFunctionInfo[] {tableFunction},
config.get(PYTHON_METRIC_ENABLED),
config.get(PYTHON_PROFILE_ENABLED))
.toByteArray());
interpreter.exec(
"table_operation = create_table_operation_from_proto("
+ "proto,"
+ "input_coder_proto,"
+ "output_coder_proto)");
// invoke the open method of table_operation which calls
// the open method of the user-defined table function.
interpreter.invokeMethod("table_operation", "open");
}
@Override
public void endInput() {
if (interpreter != null) {
// invoke the open method of table_operation which calls
// the close method of the user-defined table function.
interpreter.invokeMethod("table_operation", "close");
}
}
@SuppressWarnings("unchecked")
@Override
public void processElement(StreamRecord<RowData> element) throws Exception {
RowData value = element.getValue();
for (int i = 0; i < userDefinedFunctionInputArgs.length; i++) {
userDefinedFunctionInputArgs[i] =
userDefinedFunctionInputConverters[i].toExternal(value, udfInputOffsets[i]);
}
PyIterator udtfResults =
(PyIterator)
interpreter.invokeMethod(
"table_operation",
"process_element",
(Object) (userDefinedFunctionInputArgs));
if (udtfResults.hasNext()) {
do {
Object[] udtfResult = (Object[]) udtfResults.next();
for (int i = 0; i < udtfResult.length; i++) {
reuseResultRowData.setField(
i, userDefinedFunctionOutputConverters[i].toInternal(udtfResult[i]));
}
rowDataWrapper.collect(reuseJoinedRow.replace(value, reuseResultRowData));
} while (udtfResults.hasNext());
} else if (joinType == FlinkJoinType.LEFT) {
rowDataWrapper.collect(reuseJoinedRow.replace(value, reuseNullResultRowData));
}
udtfResults.close();
}
}
|
EmbeddedPythonTableFunctionOperator
|
java
|
apache__kafka
|
streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java
|
{
"start": 8385,
"end": 25238
}
|
class ____ extends AbstractConfig {
private static final Logger log = LoggerFactory.getLogger(StreamsConfig.class);
private static final ConfigDef CONFIG;
private final boolean eosEnabled;
private static final long DEFAULT_COMMIT_INTERVAL_MS = 30000L;
private static final long EOS_DEFAULT_COMMIT_INTERVAL_MS = 100L;
private static final int DEFAULT_TRANSACTION_TIMEOUT = 10000;
@Deprecated
@SuppressWarnings("unused")
public static final int DUMMY_THREAD_INDEX = 1;
public static final long MAX_TASK_IDLE_MS_DISABLED = -1;
// We impose these limitations because client tags are encoded into the subscription info,
// which is part of the group metadata message that is persisted into the internal topic.
public static final int MAX_RACK_AWARE_ASSIGNMENT_TAG_LIST_SIZE = 5;
public static final int MAX_RACK_AWARE_ASSIGNMENT_TAG_KEY_LENGTH = 20;
public static final int MAX_RACK_AWARE_ASSIGNMENT_TAG_VALUE_LENGTH = 30;
/**
* Prefix used to provide default topic configs to be applied when creating internal topics.
* These should be valid properties from {@link org.apache.kafka.common.config.TopicConfig TopicConfig}.
* It is recommended to use {@link #topicPrefix(String)}.
*/
// TODO: currently we cannot get the full topic configurations and hence cannot allow topic configs without the prefix,
// this can be lifted once kafka.log.LogConfig is completely deprecated by org.apache.kafka.common.config.TopicConfig
@SuppressWarnings("WeakerAccess")
public static final String TOPIC_PREFIX = "topic.";
/**
* Prefix used to isolate {@link KafkaConsumer consumer} configs from other client configs.
* It is recommended to use {@link #consumerPrefix(String)} to add this prefix to {@link ConsumerConfig consumer
* properties}.
*/
@SuppressWarnings("WeakerAccess")
public static final String CONSUMER_PREFIX = "consumer.";
/**
* Prefix used to override {@link KafkaConsumer consumer} configs for the main consumer client from
* the general consumer client configs. The override precedence is the following (from highest to lowest precedence):
* 1. main.consumer.[config-name]
* 2. consumer.[config-name]
* 3. [config-name]
*/
@SuppressWarnings("WeakerAccess")
public static final String MAIN_CONSUMER_PREFIX = "main.consumer.";
/**
* Prefix used to override {@link KafkaConsumer consumer} configs for the restore consumer client from
* the general consumer client configs. The override precedence is the following (from highest to lowest precedence):
* 1. restore.consumer.[config-name]
* 2. consumer.[config-name]
* 3. [config-name]
*/
@SuppressWarnings("WeakerAccess")
public static final String RESTORE_CONSUMER_PREFIX = "restore.consumer.";
/**
* Prefix used to override {@link KafkaConsumer consumer} configs for the global consumer client from
* the general consumer client configs. The override precedence is the following (from highest to lowest precedence):
* 1. global.consumer.[config-name]
* 2. consumer.[config-name]
* 3. [config-name]
*/
@SuppressWarnings("WeakerAccess")
public static final String GLOBAL_CONSUMER_PREFIX = "global.consumer.";
/**
* Prefix used to isolate {@link KafkaProducer producer} configs from other client configs.
* It is recommended to use {@link #producerPrefix(String)} to add this prefix to {@link ProducerConfig producer
* properties}.
*/
@SuppressWarnings("WeakerAccess")
public static final String PRODUCER_PREFIX = "producer.";
/**
* Prefix used to isolate {@link Admin admin} configs from other client configs.
* It is recommended to use {@link #adminClientPrefix(String)} to add this prefix to {@link AdminClientConfig admin
* client properties}.
*/
@SuppressWarnings("WeakerAccess")
public static final String ADMIN_CLIENT_PREFIX = "admin.";
/**
* Prefix used to add arbitrary tags to a Kafka Stream's instance as key-value pairs.
* Example:
* client.tag.zone=zone1
* client.tag.cluster=cluster1
*/
@SuppressWarnings("WeakerAccess")
public static final String CLIENT_TAG_PREFIX = "client.tag.";
/**
* Config value for parameter {@link #TOPOLOGY_OPTIMIZATION_CONFIG "topology.optimization"} for disabling topology optimization
*/
public static final String NO_OPTIMIZATION = "none";
/**
* Config value for parameter {@link #TOPOLOGY_OPTIMIZATION_CONFIG "topology.optimization"} for enabling topology optimization
*/
public static final String OPTIMIZE = "all";
/**
* Config value for parameter {@link #TOPOLOGY_OPTIMIZATION_CONFIG "topology.optimization"}
* for enabling the specific optimization that reuses source topic as changelog topic
* for KTables.
*/
public static final String REUSE_KTABLE_SOURCE_TOPICS = "reuse.ktable.source.topics";
/**
* Config value for parameter {@link #TOPOLOGY_OPTIMIZATION_CONFIG "topology.optimization"}
* for enabling the specific optimization that merges duplicated repartition topics.
*/
public static final String MERGE_REPARTITION_TOPICS = "merge.repartition.topics";
/**
* Config value for parameter {@link #TOPOLOGY_OPTIMIZATION_CONFIG "topology.optimization"}
* for enabling the optimization that optimizes inner stream-stream joins into self-joins when
* both arguments are the same stream.
*/
public static final String SINGLE_STORE_SELF_JOIN = "single.store.self.join";
private static final List<String> TOPOLOGY_OPTIMIZATION_CONFIGS = Arrays.asList(
OPTIMIZE, NO_OPTIMIZATION, REUSE_KTABLE_SOURCE_TOPICS, MERGE_REPARTITION_TOPICS,
SINGLE_STORE_SELF_JOIN);
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.4.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_24 = UpgradeFromValues.UPGRADE_FROM_24.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.5.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_25 = UpgradeFromValues.UPGRADE_FROM_25.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.6.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_26 = UpgradeFromValues.UPGRADE_FROM_26.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.7.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_27 = UpgradeFromValues.UPGRADE_FROM_27.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 2.8.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_28 = UpgradeFromValues.UPGRADE_FROM_28.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.0.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_30 = UpgradeFromValues.UPGRADE_FROM_30.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.1.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_31 = UpgradeFromValues.UPGRADE_FROM_31.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.2.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_32 = UpgradeFromValues.UPGRADE_FROM_32.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.3.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_33 = UpgradeFromValues.UPGRADE_FROM_33.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.4.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_34 = UpgradeFromValues.UPGRADE_FROM_34.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.5.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_35 = UpgradeFromValues.UPGRADE_FROM_35.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.6.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_36 = UpgradeFromValues.UPGRADE_FROM_36.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.7.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_37 = UpgradeFromValues.UPGRADE_FROM_37.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.8.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_38 = UpgradeFromValues.UPGRADE_FROM_38.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 3.9.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_39 = UpgradeFromValues.UPGRADE_FROM_39.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 4.0.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_40 = UpgradeFromValues.UPGRADE_FROM_40.toString();
/**
* Config value for parameter {@link #UPGRADE_FROM_CONFIG "upgrade.from"} for upgrading an application from version {@code 4.1.x}.
*/
@SuppressWarnings("WeakerAccess")
public static final String UPGRADE_FROM_41 = UpgradeFromValues.UPGRADE_FROM_41.toString();
/**
* Config value for parameter {@link #PROCESSING_GUARANTEE_CONFIG "processing.guarantee"} for at-least-once processing guarantees.
*/
@SuppressWarnings("WeakerAccess")
public static final String AT_LEAST_ONCE = "at_least_once";
/**
* Config value for parameter {@link #PROCESSING_GUARANTEE_CONFIG "processing.guarantee"} for exactly-once processing guarantees.
*
* <p> Enabling exactly-once-v2 requires broker version 2.5 or higher.
*/
@SuppressWarnings("WeakerAccess")
public static final String EXACTLY_ONCE_V2 = "exactly_once_v2";
public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_NONE = "none";
public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_MIN_TRAFFIC = "min_traffic";
public static final String RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY = "balance_subtopology";
/**
* Config value for parameter {@link #BUILT_IN_METRICS_VERSION_CONFIG "built.in.metrics.version"} for the latest built-in metrics version.
*/
public static final String METRICS_LATEST = "latest";
/** {@code acceptable.recovery.lag} */
public static final String ACCEPTABLE_RECOVERY_LAG_CONFIG = "acceptable.recovery.lag";
private static final String ACCEPTABLE_RECOVERY_LAG_DOC = "The maximum acceptable lag (number of offsets to catch up) for a client to be considered caught-up enough" +
" to receive an active task assignment. Upon assignment, it will still restore the rest of the changelog" +
" before processing. To avoid a pause in processing during rebalances, this config" +
" should correspond to a recovery time of well under a minute for a given workload. Must be at least 0.";
/** {@code allow.os.group.write.access} */
@SuppressWarnings("WeakerAccess")
public static final String ALLOW_OS_GROUP_WRITE_ACCESS_CONFIG = "allow.os.group.write.access";
private static final String ALLOW_OS_GROUP_WRITE_ACCESS_DOC = "Allows state store directories created by Kafka Streams to have write access for the OS group. Default is false";
/** {@code application.id} */
@SuppressWarnings("WeakerAccess")
public static final String APPLICATION_ID_CONFIG = "application.id";
private static final String APPLICATION_ID_DOC = "An identifier for the stream processing application. Must be unique within the Kafka cluster. It is used as 1) the default client-id prefix, 2) the group-id for membership management, 3) the changelog topic prefix.";
/**{@code application.server} */
@SuppressWarnings("WeakerAccess")
public static final String APPLICATION_SERVER_CONFIG = "application.server";
private static final String APPLICATION_SERVER_DOC = "A host:port pair pointing to a user-defined endpoint that can be used for state store discovery and interactive queries on this KafkaStreams instance.";
/** {@code bootstrap.servers} */
@SuppressWarnings("WeakerAccess")
public static final String BOOTSTRAP_SERVERS_CONFIG = CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG;
/** {@code buffered.records.per.partition} */
@SuppressWarnings("WeakerAccess")
public static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG = "buffered.records.per.partition";
@Deprecated
public static final String BUFFERED_RECORDS_PER_PARTITION_DOC = "Maximum number of records to buffer per partition.";
/** {@code built.in.metrics.version} */
public static final String BUILT_IN_METRICS_VERSION_CONFIG = "built.in.metrics.version";
private static final String BUILT_IN_METRICS_VERSION_DOC = "Version of the built-in metrics to use.";
/** {@code cache.max.bytes.buffering}
* @deprecated Since 3.4. Use {@link #STATESTORE_CACHE_MAX_BYTES_CONFIG "statestore.cache.max.bytes"} instead. */
@SuppressWarnings("WeakerAccess")
@Deprecated
public static final String CACHE_MAX_BYTES_BUFFERING_CONFIG = "cache.max.bytes.buffering";
@Deprecated
public static final String CACHE_MAX_BYTES_BUFFERING_DOC = "Maximum number of memory bytes to be used for buffering across all threads";
/** {@code client.id} */
@SuppressWarnings("WeakerAccess")
public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG;
private static final String CLIENT_ID_DOC = "An ID prefix string used for the client IDs of internal (main, restore, and global) consumers , producers, and admin clients" +
" with pattern <code><client.id>-[Global]StreamThread[-<threadSequenceNumber>]-<consumer|producer|restore-consumer|global-consumer></code>.";
/** {@code commit.interval.ms} */
@SuppressWarnings("WeakerAccess")
public static final String COMMIT_INTERVAL_MS_CONFIG = "commit.interval.ms";
private static final String COMMIT_INTERVAL_MS_DOC = "The frequency in milliseconds with which to commit processing progress." +
" For at-least-once processing, committing means to save the position (ie, offsets) of the processor." +
" For exactly-once processing, it means to commit the transaction which includes to save the position and to make the committed data in the output topic visible to consumers with isolation level read_committed." +
" (Note, if <code>processing.guarantee</code> is set to <code>" + EXACTLY_ONCE_V2 + "</code>, the default value is <code>" + EOS_DEFAULT_COMMIT_INTERVAL_MS + "</code>," +
" otherwise the default value is <code>" + DEFAULT_COMMIT_INTERVAL_MS + "</code>.";
/** {@code connections.max.idle.ms} */
@SuppressWarnings("WeakerAccess")
public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG;
/** {@code default.client.supplier} */
@SuppressWarnings("WeakerAccess")
public static final String DEFAULT_CLIENT_SUPPLIER_CONFIG = "default.client.supplier";
@Deprecated
public static final String DEFAULT_CLIENT_SUPPLIER_DOC = "Client supplier
|
StreamsConfig
|
java
|
spring-projects__spring-boot
|
module/spring-boot-amqp/src/main/java/org/springframework/boot/amqp/autoconfigure/AbstractConnectionFactoryConfigurer.java
|
{
"start": 1289,
"end": 4043
}
|
class ____<T extends AbstractConnectionFactory> {
private final RabbitProperties rabbitProperties;
private @Nullable ConnectionNameStrategy connectionNameStrategy;
private final RabbitConnectionDetails connectionDetails;
/**
* Creates a new configurer that will configure the connection factory using the given
* {@code properties}.
* @param properties the properties to use to configure the connection factory
*/
protected AbstractConnectionFactoryConfigurer(RabbitProperties properties) {
this(properties, new PropertiesRabbitConnectionDetails(properties, null));
}
/**
* Creates a new configurer that will configure the connection factory using the given
* {@code properties} and {@code connectionDetails}, with the latter taking priority.
* @param properties the properties to use to configure the connection factory
* @param connectionDetails the connection details to use to configure the connection
* factory
*/
protected AbstractConnectionFactoryConfigurer(RabbitProperties properties,
RabbitConnectionDetails connectionDetails) {
Assert.notNull(properties, "'properties' must not be null");
Assert.notNull(connectionDetails, "'connectionDetails' must not be null");
this.rabbitProperties = properties;
this.connectionDetails = connectionDetails;
}
protected final @Nullable ConnectionNameStrategy getConnectionNameStrategy() {
return this.connectionNameStrategy;
}
public final void setConnectionNameStrategy(@Nullable ConnectionNameStrategy connectionNameStrategy) {
this.connectionNameStrategy = connectionNameStrategy;
}
/**
* Configures the given {@code connectionFactory} with sensible defaults.
* @param connectionFactory connection factory to configure
*/
public final void configure(T connectionFactory) {
Assert.notNull(connectionFactory, "'connectionFactory' must not be null");
PropertyMapper map = PropertyMapper.get();
String addresses = this.connectionDetails.getAddresses()
.stream()
.map((address) -> address.host() + ":" + address.port())
.collect(Collectors.joining(","));
map.from(addresses).to(connectionFactory::setAddresses);
map.from(this.rabbitProperties::getAddressShuffleMode).to(connectionFactory::setAddressShuffleMode);
map.from(this.connectionNameStrategy).to(connectionFactory::setConnectionNameStrategy);
configure(connectionFactory, this.rabbitProperties);
}
/**
* Configures the given {@code connectionFactory} using the given
* {@code rabbitProperties}.
* @param connectionFactory connection factory to configure
* @param rabbitProperties properties to use for the configuration
*/
protected abstract void configure(T connectionFactory, RabbitProperties rabbitProperties);
}
|
AbstractConnectionFactoryConfigurer
|
java
|
spring-projects__spring-framework
|
spring-messaging/src/test/java/org/springframework/messaging/simp/config/MessageBrokerConfigurationTests.java
|
{
"start": 25070,
"end": 25495
}
|
class ____ extends SimpleBrokerConfig {
@Override
public void configureMessageBroker(MessageBrokerRegistry registry) {
registry.enableStompBrokerRelay("/topic", "/queue")
.setAutoStartup(true)
.setTcpClient(new NoOpTcpClient())
.setUserDestinationBroadcast("/topic/unresolved-user-destination")
.setUserRegistryBroadcast("/topic/simp-user-registry");
}
}
@Configuration
static
|
BrokerRelayConfig
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/identity/HANAIdentityColumnSupport.java
|
{
"start": 182,
"end": 751
}
|
class ____ extends IdentityColumnSupportImpl {
public static final HANAIdentityColumnSupport INSTANCE = new HANAIdentityColumnSupport();
@Override
public boolean supportsIdentityColumns() {
return true;
}
@Override
public String getIdentitySelectString(String table, String column, int type) throws MappingException {
return "select current_identity_value() from " + table;
}
@Override
public String getIdentityColumnString(int type) {
// implicitly start with 1 increment by 1
return "generated by default as identity";
}
}
|
HANAIdentityColumnSupport
|
java
|
spring-projects__spring-boot
|
core/spring-boot/src/test/java/org/springframework/boot/context/properties/ConfigurationPropertiesTests.java
|
{
"start": 98656,
"end": 98990
}
|
class ____ {
private final CustomList<String> values;
ConstructorBoundCustomListProperties(CustomList<String> values) {
this.values = values;
}
CustomList<String> getValues() {
return this.values;
}
}
@EnableConfigurationProperties(SetterBoundCustomListProperties.class)
static
|
ConstructorBoundCustomListProperties
|
java
|
apache__camel
|
core/camel-management/src/test/java/org/apache/camel/management/ManagedStatisticsLevelOffTest.java
|
{
"start": 1331,
"end": 2974
}
|
class ____ extends ManagementTestSupport {
@Override
protected CamelContext createCamelContext() throws Exception {
CamelContext context = super.createCamelContext();
// disable it by default
context.getManagementStrategy().getManagementAgent().setStatisticsLevel(ManagementStatisticsLevel.Off);
return context;
}
@Test
public void testManageStatisticsLevelDisabled() throws Exception {
template.sendBody("direct:start", "Hello World");
template.sendBody("direct:start", "Bye World");
// get the stats for the route
MBeanServer mbeanServer = getMBeanServer();
Set<ObjectName> set = mbeanServer.queryNames(new ObjectName("*:type=routes,*"), null);
assertEquals(1, set.size());
ObjectName on = set.iterator().next();
// use route to get the total time
Long completed = (Long) mbeanServer.getAttribute(on, "ExchangesCompleted");
assertEquals(0, completed.longValue());
// enable statistics
mbeanServer.setAttribute(on, new Attribute("StatisticsEnabled", true));
// send in another message
template.sendBody("direct:start", "Goodday World");
// should be 1
completed = (Long) mbeanServer.getAttribute(on, "ExchangesCompleted");
assertEquals(1, completed.longValue());
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").to("mock:result");
}
};
}
}
|
ManagedStatisticsLevelOffTest
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/issue_1100/Issue1150.java
|
{
"start": 648,
"end": 718
}
|
class ____ {
public Item[] values;
}
public static
|
Model2
|
java
|
apache__kafka
|
metadata/src/test/java/org/apache/kafka/controller/ReplicationControlManagerTest.java
|
{
"start": 9885,
"end": 10106
}
|
class ____ {
private static final Logger log = LoggerFactory.getLogger(ReplicationControlManagerTest.class);
private static final int BROKER_SESSION_TIMEOUT_MS = 1000;
private static
|
ReplicationControlManagerTest
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/collectionincompatibletype/TruthIncompatibleTypeTest.java
|
{
"start": 4158,
"end": 4571
}
|
class ____ {
public void f() {
assertThat(Byte.valueOf((byte) 2)).isEqualTo(2);
}
}
""")
.doTest();
}
@Test
public void chainedThrowAssertion_noMatch() {
compilationHelper
.addSourceLines(
"Test.java",
"""
import static com.google.common.truth.Truth.assertThat;
public
|
Test
|
java
|
playframework__playframework
|
web/play-java-forms/src/main/java/play/data/validation/Constraints.java
|
{
"start": 24695,
"end": 25027
}
|
interface ____ {
String message() default "error.invalid";
Class<?>[] groups() default {};
Class<? extends Payload>[] payload() default {};
/** Defines several {@code @ValidateWithPayload} annotations on the same element. */
@Target({TYPE, ANNOTATION_TYPE})
@Retention(RUNTIME)
public @
|
ValidateWithPayload
|
java
|
apache__flink
|
flink-table/flink-table-runtime/src/test/java/org/apache/flink/table/runtime/util/GenericRowRecordSortComparator.java
|
{
"start": 1121,
"end": 1243
}
|
class ____ compare two GenericRowData based on sortKey value. Note: Only support sortKey
* is Comparable value.
*/
public
|
to
|
java
|
apache__logging-log4j2
|
log4j-core/src/main/java/org/apache/logging/log4j/core/appender/rolling/action/CommonsCompressAction.java
|
{
"start": 1330,
"end": 5685
}
|
class ____ extends AbstractAction {
private static final int BUF_SIZE = 8192;
/**
* Compressor name. One of "gz", "bzip2", "xz", "zst", "pack200" or "deflate".
*/
private final String name;
/**
* Source file.
*/
private final File source;
/**
* Destination file.
*/
private final File destination;
/**
* If true, attempt to delete file on completion.
*/
private final boolean deleteSource;
/**
* Creates new instance of Bzip2CompressAction.
*
* @param name the compressor name. One of "gz", "bzip2", "xz", "zst", "pack200", or "deflate".
* @param source file to compress, may not be null.
* @param destination compressed file, may not be null.
* @param deleteSource if true, attempt to delete file on completion. Failure to delete does not cause an exception
* to be thrown or affect return value.
*/
public CommonsCompressAction(
final String name, final File source, final File destination, final boolean deleteSource) {
Objects.requireNonNull(source, "source");
Objects.requireNonNull(destination, "destination");
this.name = name;
this.source = source;
this.destination = destination;
this.deleteSource = deleteSource;
}
/**
* Compresses.
*
* @return true if successfully compressed.
* @throws IOException on IO exception.
*/
@Override
public boolean execute() throws IOException {
return execute(name, source, destination, deleteSource);
}
/**
* Compresses a file.
*
* @param name the compressor name, i.e. "gz", "bzip2", "xz", "zstd", "pack200", or "deflate".
* @param source file to compress, may not be null.
* @param destination compressed file, may not be null.
* @param deleteSource if true, attempt to delete file on completion. Failure to delete does not cause an exception
* to be thrown or affect return value.
*
* @return true if source file compressed.
* @throws IOException on IO exception.
*/
public static boolean execute(
final String name, final File source, final File destination, final boolean deleteSource)
throws IOException {
if (!source.exists()) {
return false;
}
LOGGER.debug("Starting {} compression of {}", name, source.getPath());
try (final FileInputStream input = new FileInputStream(source);
final FileOutputStream fileOutput = new FileOutputStream(destination);
final BufferedOutputStream output = new BufferedOutputStream(
new CompressorStreamFactory().createCompressorOutputStream(name, fileOutput))) {
IOUtils.copy(input, output, BUF_SIZE);
LOGGER.debug("Finished {} compression of {}", name, source.getPath());
} catch (final CompressorException e) {
throw new IOException(e);
}
if (deleteSource) {
try {
if (Files.deleteIfExists(source.toPath())) {
LOGGER.debug("Deleted {}", source.toString());
} else {
LOGGER.warn(
"Unable to delete {} after {} compression. File did not exist", source.toString(), name);
}
} catch (final Exception ex) {
LOGGER.warn("Unable to delete {} after {} compression, {}", source.toString(), name, ex.getMessage());
}
}
return true;
}
/**
* Reports exception.
*
* @param ex exception.
*/
@Override
protected void reportException(final Exception ex) {
LOGGER.warn("Exception during " + name + " compression of '" + source.toString() + "'.", ex);
}
@Override
public String toString() {
return CommonsCompressAction.class.getSimpleName() + '[' + source + " to " + destination + ", deleteSource="
+ deleteSource + ']';
}
public String getName() {
return name;
}
public File getSource() {
return source;
}
public File getDestination() {
return destination;
}
public boolean isDeleteSource() {
return deleteSource;
}
}
|
CommonsCompressAction
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/mapping/FilterConfiguration.java
|
{
"start": 370,
"end": 2233
}
|
class ____ {
private final String name;
private final String condition;
private final boolean autoAliasInjection;
private final Map<String, String> aliasTableMap;
private final Map<String, String> aliasEntityMap;
private final PersistentClass persistentClass;
public FilterConfiguration(
String name,
String condition,
boolean autoAliasInjection,
Map<String, String> aliasTableMap,
Map<String, String> aliasEntityMap,
PersistentClass persistentClass) {
this.name = name;
this.condition = condition;
this.autoAliasInjection = autoAliasInjection;
this.aliasTableMap = aliasTableMap;
this.aliasEntityMap = aliasEntityMap;
this.persistentClass = persistentClass;
}
public String getName() {
return name;
}
public String getCondition() {
return condition;
}
public boolean useAutoAliasInjection() {
return autoAliasInjection;
}
public Map<String, String> getAliasTableMap(SessionFactoryImplementor factory) {
final var mergedAliasTableMap = mergeAliasMaps( factory );
if ( !mergedAliasTableMap.isEmpty() ) {
return mergedAliasTableMap;
}
else if ( persistentClass != null ) {
final String tableName =
persistentClass.getTable()
.getQualifiedName( factory.getSqlStringGenerationContext() );
return singletonMap( null, tableName );
}
else {
return emptyMap();
}
}
private Map<String, String> mergeAliasMaps(SessionFactoryImplementor factory) {
final Map<String, String> result = new HashMap<>();
if ( aliasTableMap != null ) {
result.putAll( aliasTableMap );
}
if ( aliasEntityMap != null ) {
for ( var entry : aliasEntityMap.entrySet() ) {
final var joinable =
factory.getMappingMetamodel()
.getEntityDescriptor( entry.getValue() );
result.put( entry.getKey(), joinable.getTableName() );
}
}
return result;
}
}
|
FilterConfiguration
|
java
|
google__dagger
|
dagger-producers/main/java/dagger/producers/monitoring/internal/Monitors.java
|
{
"start": 1156,
"end": 3112
}
|
class ____ {
private static final Logger logger = Logger.getLogger(Monitors.class.getName());
/**
* Returns a monitor factory that delegates to the given factories, and ensures that any method
* called on this object, even transitively, does not throw a {@link RuntimeException} or return
* null.
*
* <p>If the delegate monitors throw an {@link Error}, then that will escape this monitor
* implementation. Errors are treated as unrecoverable conditions, and may cause the entire
* component's execution to fail.
*/
public static ProductionComponentMonitor.Factory delegatingProductionComponentMonitorFactory(
Collection<? extends ProductionComponentMonitor.Factory> factories) {
if (factories.isEmpty()) {
return ProductionComponentMonitor.Factory.noOp();
} else if (factories.size() == 1) {
return new NonThrowingProductionComponentMonitor.Factory(Iterables.getOnlyElement(factories));
} else {
return new DelegatingProductionComponentMonitor.Factory(factories);
}
}
/**
* Creates a new monitor for the given component, from a set of monitor factories. This will not
* throw a {@link RuntimeException} or return null.
*/
public static ProductionComponentMonitor createMonitorForComponent(
Provider<?> componentProvider,
Provider<Set<ProductionComponentMonitor.Factory>> monitorFactorySetProvider) {
try {
ProductionComponentMonitor.Factory factory =
delegatingProductionComponentMonitorFactory(monitorFactorySetProvider.get());
return factory.create(componentProvider.get());
} catch (RuntimeException e) {
logger.log(Level.SEVERE, "RuntimeException while constructing monitor factories.", e);
return ProductionComponentMonitor.noOp();
}
}
/**
* A component monitor that delegates to a single monitor, and catches and logs all exceptions
* that the delegate throws.
*/
private static final
|
Monitors
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/runtime/checkpoint/SubtaskStateStatsTest.java
|
{
"start": 1005,
"end": 2968
}
|
class ____ {
/** Tests simple access via the getters. */
@Test
void testSimpleAccess() throws Exception {
test(false);
}
/** Tests that the snapshot is actually serializable. */
@Test
void testIsJavaSerializable() throws Exception {
test(true);
}
public void test(boolean serialize) throws Exception {
SubtaskStateStats stats =
new SubtaskStateStats(
0,
Integer.MAX_VALUE + 1L,
Integer.MAX_VALUE + 2L,
Integer.MAX_VALUE + 2L,
Integer.MAX_VALUE + 3L,
Integer.MAX_VALUE + 4L,
Integer.MAX_VALUE + 8L,
Integer.MAX_VALUE + 9L,
Integer.MAX_VALUE + 6L,
Integer.MAX_VALUE + 7L,
false,
true);
stats = serialize ? CommonTestUtils.createCopySerializable(stats) : stats;
assertThat(stats.getSubtaskIndex()).isZero();
assertThat(stats.getAckTimestamp()).isEqualTo(Integer.MAX_VALUE + 1L);
assertThat(stats.getStateSize()).isEqualTo(Integer.MAX_VALUE + 2L);
assertThat(stats.getSyncCheckpointDuration()).isEqualTo(Integer.MAX_VALUE + 3L);
assertThat(stats.getAsyncCheckpointDuration()).isEqualTo(Integer.MAX_VALUE + 4L);
assertThat(stats.getAlignmentDuration()).isEqualTo(Integer.MAX_VALUE + 6L);
assertThat(stats.getCheckpointStartDelay()).isEqualTo(Integer.MAX_VALUE + 7L);
// Check duration helper
long ackTimestamp = stats.getAckTimestamp();
long triggerTimestamp = ackTimestamp - 10123;
assertThat(stats.getEndToEndDuration(triggerTimestamp)).isEqualTo(10123);
// Trigger timestamp < ack timestamp
assertThat(stats.getEndToEndDuration(ackTimestamp + 1)).isZero();
}
}
|
SubtaskStateStatsTest
|
java
|
grpc__grpc-java
|
alts/src/generated/main/grpc/io/grpc/alts/internal/HandshakerServiceGrpc.java
|
{
"start": 4865,
"end": 5692
}
|
interface ____ {
/**
* <pre>
* Handshaker service accepts a stream of handshaker request, returning a
* stream of handshaker response. Client is expected to send exactly one
* message with either client_start or server_start followed by one or more
* messages with next. Each time client sends a request, the handshaker
* service expects to respond. Client does not have to wait for service's
* response before sending next request.
* </pre>
*/
default io.grpc.stub.StreamObserver<io.grpc.alts.internal.HandshakerReq> doHandshake(
io.grpc.stub.StreamObserver<io.grpc.alts.internal.HandshakerResp> responseObserver) {
return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall(getDoHandshakeMethod(), responseObserver);
}
}
/**
* Base
|
AsyncService
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/query/sqm/tree/expression/SqmCoalesce.java
|
{
"start": 939,
"end": 4570
}
|
class ____<T> extends AbstractSqmExpression<T> implements JpaCoalesce<T> {
private final SqmFunctionDescriptor functionDescriptor;
private final List<SqmExpression<? extends T>> arguments;
public SqmCoalesce(NodeBuilder nodeBuilder) {
this( null, nodeBuilder );
}
public SqmCoalesce(@Nullable SqmBindableType<T> type, NodeBuilder nodeBuilder) {
super( type, nodeBuilder );
functionDescriptor = nodeBuilder.getQueryEngine().getSqmFunctionRegistry().getFunctionDescriptor( "coalesce" );
this.arguments = new ArrayList<>();
}
public SqmCoalesce(@Nullable SqmBindableType<T> type, int numberOfArguments, NodeBuilder nodeBuilder) {
super( type, nodeBuilder );
functionDescriptor = nodeBuilder.getQueryEngine().getSqmFunctionRegistry().getFunctionDescriptor( "coalesce" );
this.arguments = new ArrayList<>( numberOfArguments );
}
@Override
public SqmCoalesce<T> copy(SqmCopyContext context) {
final SqmCoalesce<T> existing = context.getCopy( this );
if ( existing != null ) {
return existing;
}
final SqmCoalesce<T> coalesce = context.registerCopy(
this,
new SqmCoalesce<>(
getNodeType(),
arguments.size(),
nodeBuilder()
)
);
for ( SqmExpression<? extends T> argument : arguments ) {
coalesce.arguments.add( argument.copy( context ) );
}
copyTo( coalesce, context );
return coalesce;
}
public SqmFunctionDescriptor getFunctionDescriptor() {
return functionDescriptor;
}
public void value(SqmExpression<? extends T> expression) {
arguments.add( expression );
}
public List<SqmExpression<? extends T>> getArguments() {
return arguments;
}
@Override
public <X> X accept(SemanticQueryWalker<X> walker) {
return walker.visitCoalesce( this );
}
@Override
public String asLoggableText() {
return "coalesce(...)";
}
@Override
public void appendHqlString(StringBuilder hql, SqmRenderContext context) {
hql.append( "coalesce(" );
arguments.get( 0 ).appendHqlString( hql, context );
for ( int i = 1; i < arguments.size(); i++ ) {
hql.append(", ");
arguments.get( i ).appendHqlString( hql, context );
}
hql.append( ')' );
}
@Override
public boolean equals(@Nullable Object object) {
return object instanceof SqmCoalesce<?> that
&& Objects.equals( this.arguments, that.arguments );
}
@Override
public int hashCode() {
return Objects.hashCode( arguments );
}
@Override
public boolean isCompatible(Object object) {
return object instanceof SqmCoalesce<?> that
&& SqmCacheable.areCompatible( this.arguments, that.arguments );
}
@Override
public int cacheHashCode() {
return SqmCacheable.cacheHashCode( arguments );
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// JPA
@Override
public SqmCoalesce<T> value(@Nullable T value) {
value( nodeBuilder().value( value, firstOrNull() ) );
return this;
}
private @Nullable SqmExpression<T> firstOrNull() {
if ( isEmpty( arguments ) ) {
return null;
}
else {
//noinspection unchecked
return (SqmExpression<T>) arguments.get( 0 );
}
}
@Override
public SqmCoalesce<T> value(Expression<? extends T> value) {
value( (SqmExpression<? extends T>) value );
return this;
}
@Override
public SqmCoalesce<T> value(JpaExpression<? extends T> value) {
//noinspection unchecked
value( (SqmExpression<T>) value );
return this;
}
@Override
@SuppressWarnings("unchecked")
public SqmCoalesce<T> values(T... values) {
final SqmExpression<T> firstOrNull = firstOrNull();
for ( T value : values ) {
value( nodeBuilder().value( value, firstOrNull ) );
}
return this;
}
}
|
SqmCoalesce
|
java
|
assertj__assertj-core
|
assertj-tests/assertj-integration-tests/assertj-core-tests/src/test/java/org/assertj/tests/core/api/Assertions_assertThat_with_InputStream_Test.java
|
{
"start": 986,
"end": 1362
}
|
class ____ {
private static InputStream actual;
@BeforeAll
static void setUpOnce() {
actual = new ByteArrayInputStream(new byte[0]);
}
@Test
void should_create_Assert() {
AbstractInputStreamAssert<?, ? extends InputStream> assertions = Assertions.assertThat(actual);
assertThat(assertions).isNotNull();
}
}
|
Assertions_assertThat_with_InputStream_Test
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/mapping/mutability/attribute/MutableMapAsBasicTests.java
|
{
"start": 3424,
"end": 3716
}
|
class ____ {
@Id
private Integer id;
@Convert( converter = MapConverter.class )
private Map<String,String> data;
private TestEntity() {
// for use by Hibernate
}
public TestEntity(Integer id, Map<String,String> data) {
this.id = id;
this.data = data;
}
}
}
|
TestEntity
|
java
|
apache__camel
|
tooling/maven/camel-package-maven-plugin/src/it/HeaderSupport/src/main/java/org/apache/camel/component/foo/FooEndpoint.java
|
{
"start": 1267,
"end": 2154
}
|
class ____ {
@UriPath(description = "Hostname of the Foo server")
@Metadata(required = true)
private String host;
@UriPath(description = "Port of the Foo server")
private int port;
@UriParam(label = "common", defaultValue = "5")
private int intervalSeconds = 5;
public int getIntervalSeconds() {
return intervalSeconds;
}
/**
* My interval in seconds.
*/
public void setIntervalSeconds(int intervalSeconds) {
this.intervalSeconds = intervalSeconds;
}
public String getHost() {
return host;
}
/**
* Hostname of the Foo server
*/
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
/**
* Port of the Foo server
*/
public void setPort(int port) {
this.port = port;
}
}
|
FooEndpoint
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/orphan/one2one/fk/composite/EmployeeInfo.java
|
{
"start": 282,
"end": 1478
}
|
class ____ implements Serializable {
private Long companyId;
private Long personId;
public Id() {
}
public Id(Long companyId, Long personId) {
this.companyId = companyId;
this.personId = personId;
}
public Long getCompanyId() {
return companyId;
}
public void setCompanyId(Long companyId) {
this.companyId = companyId;
}
public Long getPersonId() {
return personId;
}
public void setPersonId(Long personId) {
this.personId = personId;
}
@Override
public boolean equals(Object o) {
if ( this == o ) {
return true;
}
if ( o == null || getClass() != o.getClass() ) {
return false;
}
Id id = (Id) o;
return companyId.equals( id.companyId )
&& personId.equals( id.personId );
}
@Override
public int hashCode() {
int result = companyId.hashCode();
result = 31 * result + personId.hashCode();
return result;
}
}
private Id id;
public EmployeeInfo() {
}
public EmployeeInfo(Long companyId, Long personId) {
this( new Id( companyId, personId ) );
}
public EmployeeInfo(Id id) {
this.id = id;
}
public Id getId() {
return id;
}
public void setId(Id id) {
this.id = id;
}
}
|
Id
|
java
|
quarkusio__quarkus
|
integration-tests/main/src/test/java/io/quarkus/it/main/QuarkusTestNestedWithResourcesTestCase.java
|
{
"start": 1238,
"end": 1858
}
|
class ____ {
public static final AtomicInteger COUNTER = new AtomicInteger(0);
public static final AtomicInteger COUNT_RESOURCE_STARTS = new AtomicInteger(0);
@InjectDummyString
String bar;
@Test
@Order(1)
public void testBarFromOuter() {
Assertions.assertEquals("bar", bar);
COUNTER.incrementAndGet();
}
@Test
@Order(2)
public void testResourceShouldNotHaveBeenRestarted() {
Assertions.assertEquals(1, COUNT_RESOURCE_STARTS.get());
}
@Nested
@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
|
QuarkusTestNestedWithResourcesTestCase
|
java
|
spring-projects__spring-framework
|
spring-core/src/test/java/org/springframework/core/GenericTypeResolverTests.java
|
{
"start": 13423,
"end": 13446
}
|
class ____ extends A {}
|
C
|
java
|
apache__camel
|
components/camel-slack/src/main/java/org/apache/camel/component/slack/SlackConstants.java
|
{
"start": 871,
"end": 2325
}
|
class ____ {
public static final String SLACK_USERNAME_FIELD = "username";
public static final String SLACK_TEXT_FIELD = "text";
public static final String SLACK_CHANNEL_FIELD = "channel";
public static final String SLACK_ICON_URL_FIELD = "icon_url";
public static final String SLACK_ICON_EMOJI_FIELD = "icon_emoji";
public static final String SLACK_ATTACHMENTS_FIELD = "attachments";
public static final String SLACK_ATTACHMENT_FALLBACK_FIELD = "fallback";
public static final String SLACK_ATTACHMENT_COLOR_FIELD = "color";
public static final String SLACK_ATTACHMENT_TEXT_FIELD = "text";
public static final String SLACK_ATTACHMENT_PRETEXT_FIELD = "pretext";
public static final String SLACK_ATTACHMENT_AUTHOR_NAME_FIELD = "author_name";
public static final String SLACK_ATTACHMENT_AUTHOR_LINK_FIELD = "author_link";
public static final String SLACK_ATTACHMENT_AUTHOR_ICON_FIELD = "author_icon";
public static final String SLACK_ATTACHMENT_TITLE_FIELD = "title";
public static final String SLACK_ATTACHMENT_TITLE_LINK_FIELD = "title_link";
public static final String SLACK_ATTACHMENT_IMAGE_URL_FIELD = "image_url";
public static final String SLACK_ATTACHMENT_FOOTER_FIELD = "footer";
public static final String SLACK_ATTACHMENT_FOOTER_ICON_FIELD = "footer_icon";
public static final String SLACK_ATTACHMENT_TS_FIELD = "ts";
private SlackConstants() {
}
}
|
SlackConstants
|
java
|
alibaba__fastjson
|
src/test/java/com/alibaba/json/bvt/asm/ASMDeserTest.java
|
{
"start": 1508,
"end": 1645
}
|
class ____ extends ArrayList<String> {
public EntityError(){
throw new RuntimeException();
}
}
}
|
EntityError
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/context/aot/samples/basic/DisabledInAotProcessingTests.java
|
{
"start": 2135,
"end": 2426
}
|
class ____ {
@Test
void disabledInAotMode(@Autowired String enigma) {
assertThat(AotDetector.useGeneratedArtifacts()).as("Should be disabled in AOT mode").isFalse();
assertThat(enigma).isEqualTo("puzzle");
}
@Configuration(proxyBeanMethods = false)
static
|
DisabledInAotProcessingTests
|
java
|
google__guava
|
android/guava-tests/benchmark/com/google/common/util/concurrent/ExecutionListBenchmark.java
|
{
"start": 15855,
"end": 16145
}
|
class ____ {
final Runnable runnable;
final Executor executor;
@Nullable RunnableExecutorPair next;
RunnableExecutorPair(Runnable runnable, Executor executor) {
this.runnable = runnable;
this.executor = executor;
}
}
}
}
|
RunnableExecutorPair
|
java
|
junit-team__junit5
|
platform-tests/src/test/java/org/junit/platform/commons/util/ReflectionUtilsWithGenericTypeHierarchiesTests.java
|
{
"start": 5268,
"end": 5463
}
|
class ____
implements InterfaceWithGenericObjectParameter {
public void foo(@SuppressWarnings("unused") Number a) {
}
}
public static
|
ClassImplementingGenericInterfaceWithMoreSpecificMethod
|
java
|
spring-projects__spring-boot
|
module/spring-boot-micrometer-tracing-opentelemetry/src/test/java/org/springframework/boot/micrometer/tracing/opentelemetry/autoconfigure/OpenTelemetryTracingAutoConfigurationTests.java
|
{
"start": 20911,
"end": 21535
}
|
class ____ {
@Bean
@Order(1)
SdkTracerProviderBuilderCustomizer sdkTracerProviderBuilderCustomizerOne() {
return (builder) -> {
SpanLimits spanLimits = SpanLimits.builder().setMaxNumberOfEvents(42).build();
builder.setSpanLimits(spanLimits);
};
}
@Bean
@Order(0)
SdkTracerProviderBuilderCustomizer sdkTracerProviderBuilderCustomizerTwo() {
return (builder) -> {
SpanLimits spanLimits = SpanLimits.builder().setMaxNumberOfEvents(21).build();
builder.setSpanLimits(spanLimits).setSampler(Sampler.alwaysOn());
};
}
}
private static final
|
SdkTracerProviderCustomizationConfiguration
|
java
|
apache__dubbo
|
dubbo-compatible/src/test/java/org/apache/dubbo/service/MockInvocation.java
|
{
"start": 1600,
"end": 5128
}
|
class ____ implements Invocation {
private String arg0;
private Map<String, Object> attachments;
public MockInvocation(String arg0) {
this.arg0 = arg0;
attachments = new HashMap<>();
attachments.put(PATH_KEY, "dubbo");
attachments.put(GROUP_KEY, "dubbo");
attachments.put(VERSION_KEY, "1.0.0");
attachments.put(DUBBO_VERSION_KEY, "1.0.0");
attachments.put(TOKEN_KEY, "sfag");
attachments.put(TIMEOUT_KEY, "1000");
}
@Override
public String getTargetServiceUniqueName() {
return null;
}
@Override
public String getProtocolServiceKey() {
return null;
}
public String getMethodName() {
return "echo";
}
@Override
public String getServiceName() {
return "DemoService";
}
public Class<?>[] getParameterTypes() {
return new Class[] {String.class};
}
public Object[] getArguments() {
return new Object[] {arg0};
}
public Map<String, String> getAttachments() {
return new AttachmentsAdapter.ObjectToStringMap(attachments);
}
@Override
public Map<String, Object> getObjectAttachments() {
return attachments;
}
@Override
public Map<String, Object> copyObjectAttachments() {
return new HashMap<>(attachments);
}
@Override
public void foreachAttachment(Consumer<Map.Entry<String, Object>> consumer) {
attachments.entrySet().forEach(consumer);
}
@Override
public void setAttachment(String key, String value) {
setObjectAttachment(key, value);
}
@Override
public void setAttachment(String key, Object value) {
setObjectAttachment(key, value);
}
@Override
public void setObjectAttachment(String key, Object value) {
attachments.put(key, value);
}
@Override
public void setAttachmentIfAbsent(String key, String value) {
setObjectAttachmentIfAbsent(key, value);
}
@Override
public void setAttachmentIfAbsent(String key, Object value) {
setObjectAttachmentIfAbsent(key, value);
}
@Override
public void setObjectAttachmentIfAbsent(String key, Object value) {
attachments.put(key, value);
}
public Invoker<?> getInvoker() {
return null;
}
@Override
public Object put(Object key, Object value) {
return null;
}
@Override
public Object get(Object key) {
return null;
}
@Override
public void setServiceModel(ServiceModel serviceModel) {}
@Override
public ServiceModel getServiceModel() {
return null;
}
@Override
public Map<Object, Object> getAttributes() {
return null;
}
public String getAttachment(String key) {
return (String) getObjectAttachments().get(key);
}
@Override
public Object getObjectAttachment(String key) {
return attachments.get(key);
}
public String getAttachment(String key, String defaultValue) {
return (String) getObjectAttachments().get(key);
}
@Override
public Object getObjectAttachment(String key, Object defaultValue) {
Object result = attachments.get(key);
if (result == null) {
return defaultValue;
}
return result;
}
@Override
public void addInvokedInvoker(Invoker<?> invoker) {}
@Override
public List<Invoker<?>> getInvokedInvokers() {
return null;
}
}
|
MockInvocation
|
java
|
elastic__elasticsearch
|
modules/runtime-fields-common/src/yamlRestTest/java/org/elasticsearch/painless/RuntimeFieldsClientYamlTestSuiteIT.java
|
{
"start": 896,
"end": 1516
}
|
class ____ extends ESClientYamlSuiteTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local().module("runtime-fields-common").build();
public RuntimeFieldsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return ESClientYamlSuiteTestCase.createParameters();
}
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
}
|
RuntimeFieldsClientYamlTestSuiteIT
|
java
|
hibernate__hibernate-orm
|
hibernate-testing/src/main/java/org/hibernate/testing/orm/junit/LoggingInspections.java
|
{
"start": 1030,
"end": 1600
}
|
interface ____ {
/**
* The message-key to watch for. The message-key is the combination of
* {@link org.jboss.logging.annotations.MessageLogger#projectCode()}
* and {@link org.jboss.logging.annotations.Message#id()} used by
* JBoss Logging to prefix each messaged log event
*/
String messageKey();
/**
* Whether to reset the inspection {@linkplain BeforeEachCallback before each test} method.
*/
boolean resetBeforeEach() default true;
/**
* Descriptor of the log messages to watch for
*/
Logger[] loggers() default {};
}
}
|
Message
|
java
|
quarkusio__quarkus
|
extensions/tls-registry/deployment/src/test/java/io/quarkus/tls/NamedP12TrustStoreTest.java
|
{
"start": 845,
"end": 2217
}
|
class ____ {
private static final String configuration = """
quarkus.tls.http.trust-store.p12.path=target/certs/test-formats-truststore.p12
quarkus.tls.http.trust-store.p12.password=password
""";
@RegisterExtension
static final QuarkusUnitTest config = new QuarkusUnitTest().setArchiveProducer(
() -> ShrinkWrap.create(JavaArchive.class)
.add(new StringAsset(configuration), "application.properties"));
@Inject
TlsConfigurationRegistry certificates;
@Test
void test() throws KeyStoreException, CertificateParsingException {
TlsConfiguration def = certificates.getDefault().orElseThrow();
TlsConfiguration named = certificates.get("http").orElseThrow();
assertThat(def.getTrustStoreOptions()).isNull();
assertThat(def.getTrustStore()).isNull();
assertThat(named.getTrustStoreOptions()).isNotNull();
assertThat(named.getTrustStore()).isNotNull();
X509Certificate certificate = (X509Certificate) named.getTrustStore().getCertificate("test-formats");
assertThat(certificate).isNotNull();
assertThat(certificate.getSubjectAlternativeNames()).anySatisfy(l -> {
assertThat(l.get(0)).isEqualTo(2);
assertThat(l.get(1)).isEqualTo("localhost");
});
}
}
|
NamedP12TrustStoreTest
|
java
|
ReactiveX__RxJava
|
src/test/java/io/reactivex/rxjava3/tck/SingleFlatMapFlowableTckTest.java
|
{
"start": 813,
"end": 1339
}
|
class ____ extends BaseTck<Integer> {
@Override
public Publisher<Integer> createPublisher(final long elements) {
return
Single.just(1).hide().flatMapPublisher(new Function<Integer, Publisher<Integer>>() {
@Override
public Publisher<Integer> apply(Integer v)
throws Exception {
return Flowable.range(0, (int)elements);
}
})
;
}
}
|
SingleFlatMapFlowableTckTest
|
java
|
apache__camel
|
core/camel-api/src/main/java/org/apache/camel/spi/HasGroup.java
|
{
"start": 857,
"end": 1008
}
|
interface ____ an object which has belongs to a group Group which is useful for group related
* operation such as clustering, JMX style API
*/
public
|
for
|
java
|
apache__dubbo
|
dubbo-rpc/dubbo-rpc-triple/src/main/java/org/apache/dubbo/rpc/protocol/tri/rest/mapping/meta/CorsMeta.java
|
{
"start": 1504,
"end": 7219
}
|
class ____ {
private final String[] allowedOrigins;
private final Pattern[] allowedOriginsPatterns;
private final String[] allowedMethods;
private final String[] allowedHeaders;
private final String[] exposedHeaders;
private final Boolean allowCredentials;
private final Long maxAge;
private CorsMeta(
String[] allowedOrigins,
Pattern[] allowedOriginsPatterns,
String[] allowedMethods,
String[] allowedHeaders,
String[] exposedHeaders,
Boolean allowCredentials,
Long maxAge) {
this.allowedOrigins = allowedOrigins;
this.allowedOriginsPatterns = allowedOriginsPatterns;
this.allowedMethods = allowedMethods;
this.allowedHeaders = allowedHeaders;
this.exposedHeaders = exposedHeaders;
this.allowCredentials = allowCredentials;
this.maxAge = maxAge;
}
public static Builder builder() {
return new Builder();
}
public static CorsMeta combine(CorsMeta source, CorsMeta other) {
return source == null || source.isEmpty()
? other == null || other.isEmpty() ? null : other.applyDefault()
: source.combine(other).applyDefault();
}
public String[] getAllowedOrigins() {
return allowedOrigins;
}
public Pattern[] getAllowedOriginsPatterns() {
return allowedOriginsPatterns;
}
public String[] getAllowedMethods() {
return allowedMethods;
}
public String[] getAllowedHeaders() {
return allowedHeaders;
}
public String[] getExposedHeaders() {
return exposedHeaders;
}
public Boolean getAllowCredentials() {
return allowCredentials;
}
public Long getMaxAge() {
return maxAge;
}
public boolean isEmpty() {
return allowedOrigins.length == 0
&& allowedMethods.length == 0
&& allowedHeaders.length == 0
&& exposedHeaders.length == 0
&& allowCredentials == null
&& maxAge == null;
}
public CorsMeta applyDefault() {
String[] allowedOriginArray = null;
Pattern[] allowedOriginPatternArray = null;
if (allowedOrigins.length == 0) {
allowedOriginArray = new String[] {ANY_VALUE};
allowedOriginPatternArray = new Pattern[] {null};
}
String[] allowedMethodArray = null;
if (allowedMethods.length == 0) {
allowedMethodArray =
new String[] {HttpMethods.GET.name(), HttpMethods.HEAD.name(), HttpMethods.POST.name()};
}
String[] allowedHeaderArray = null;
if (allowedHeaders.length == 0) {
allowedHeaderArray = new String[] {ANY_VALUE};
}
Long maxAgeValue = null;
if (maxAge == null) {
maxAgeValue = 1800L;
}
if (allowedOriginArray == null
&& allowedMethodArray == null
&& allowedHeaderArray == null
&& maxAgeValue == null) {
return this;
}
return new CorsMeta(
allowedOriginArray == null ? allowedOrigins : allowedOriginArray,
allowedOriginPatternArray == null ? allowedOriginsPatterns : allowedOriginPatternArray,
allowedMethodArray == null ? allowedMethods : allowedMethodArray,
allowedHeaderArray == null ? allowedHeaders : allowedHeaderArray,
exposedHeaders,
allowCredentials,
maxAgeValue);
}
public CorsMeta combine(CorsMeta other) {
if (other == null || other.isEmpty()) {
return this;
}
return new CorsMeta(
combine(allowedOrigins, other.allowedOrigins),
merge(allowedOriginsPatterns, other.allowedOriginsPatterns).toArray(new Pattern[0]),
combine(allowedMethods, other.allowedMethods),
combine(allowedHeaders, other.allowedHeaders),
combine(exposedHeaders, other.exposedHeaders),
other.allowCredentials == null ? allowCredentials : other.allowCredentials,
other.maxAge == null ? maxAge : other.maxAge);
}
/**
* Merge two arrays of CORS config values, the other array having higher priority.
*/
private static String[] combine(String[] source, String[] other) {
if (other.length == 0) {
return source;
}
if (source.length == 0 || source[0].equals(ANY_VALUE) || other[0].equals(ANY_VALUE)) {
return other;
}
return merge(source, other).toArray(EMPTY_STRING_ARRAY);
}
private static <T> Set<T> merge(T[] source, T[] other) {
int size = source.length + other.length;
if (size == 0) {
return Collections.emptySet();
}
Set<T> merged = CollectionUtils.newLinkedHashSet(size);
Collections.addAll(merged, source);
Collections.addAll(merged, other);
return merged;
}
@Override
public String toString() {
return "CorsMeta{"
+ "allowedOrigins=" + Arrays.toString(allowedOrigins)
+ ", allowedOriginsPatterns=" + Arrays.toString(allowedOriginsPatterns)
+ ", allowedMethods=" + Arrays.toString(allowedMethods)
+ ", allowedHeaders=" + Arrays.toString(allowedHeaders)
+ ", exposedHeaders=" + Arrays.toString(exposedHeaders)
+ ", allowCredentials=" + allowCredentials
+ ", maxAge=" + maxAge
+ '}';
}
public static final
|
CorsMeta
|
java
|
google__guava
|
android/guava-testlib/src/com/google/common/collect/testing/SortedMapInterfaceTest.java
|
{
"start": 966,
"end": 1098
}
|
class ____
* conformance of concrete {@link SortedMap} subclasses to that contract.
*
* @author Jared Levy
*/
// TODO: Use this
|
test
|
java
|
quarkusio__quarkus
|
independent-projects/arc/tests/src/test/java/io/quarkus/arc/test/invoker/lookup/LookupUnsatisfiedTest.java
|
{
"start": 566,
"end": 1422
}
|
class ____ {
@RegisterExtension
public ArcTestContainer container = ArcTestContainer.builder()
.beanClasses(MyService.class)
.beanRegistrars(new InvokerHelperRegistrar(MyService.class, (bean, factory, invokers) -> {
MethodInfo method = bean.getImplClazz().firstMethod("hello");
invokers.put(method.name(), factory.createInvoker(bean, method)
.withArgumentLookup(0)
.build());
}))
.shouldFail()
.build();
@Test
public void trigger() {
Throwable error = container.getFailure();
assertNotNull(error);
assertInstanceOf(DeploymentException.class, error);
assertTrue(error.getMessage().contains("Unsatisfied dependency"));
}
@Singleton
static
|
LookupUnsatisfiedTest
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/spatial/SpatialDisjointTests.java
|
{
"start": 1001,
"end": 2380
}
|
class ____ extends SpatialRelatesFunctionTestCase {
public SpatialDisjointTests(@Name("TestCase") Supplier<TestCaseSupplier.TestCase> testCaseSupplier) {
this.testCase = testCaseSupplier.get();
}
@ParametersFactory
public static Iterable<Object[]> parameters() {
List<TestCaseSupplier> suppliers = new ArrayList<>();
SpatialRelatesFunctionTestCase.addSpatialGridCombinations(suppliers, GEO_POINT);
DataType[] geoDataTypes = { DataType.GEO_POINT, DataType.GEO_SHAPE };
SpatialRelatesFunctionTestCase.addSpatialCombinations(suppliers, geoDataTypes);
DataType[] cartesianDataTypes = { DataType.CARTESIAN_POINT, DataType.CARTESIAN_SHAPE };
SpatialRelatesFunctionTestCase.addSpatialCombinations(suppliers, cartesianDataTypes);
return parameterSuppliersFromTypedData(
errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers), SpatialDisjointTests::typeErrorMessage)
);
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new SpatialDisjoint(source, args.get(0), args.get(1));
}
protected static String typeErrorMessage(boolean includeOrdinal, List<Set<DataType>> validPerPosition, List<DataType> types) {
return typeErrorMessage(includeOrdinal, validPerPosition, types, false, true);
}
}
|
SpatialDisjointTests
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/SurrogateExpression.java
|
{
"start": 987,
"end": 1164
}
|
interface ____ {
/**
* Returns the expression to be replaced by or {@code null} if this cannot
* be replaced.
*/
Expression surrogate();
}
|
SurrogateExpression
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/QuerySettings.java
|
{
"start": 763,
"end": 5577
}
|
class ____ {
public static final QuerySettingDef<String> PROJECT_ROUTING = new QuerySettingDef<>(
"project_routing",
DataType.KEYWORD,
true,
false,
true,
"A project routing expression, "
+ "used to define which projects to route the query to. "
+ "Only supported if Cross-Project Search is enabled.",
// TODO enable this when CPS is ready and we move this to tech preview
// (value, ctx) -> ctx.crossProjectEnabled() ? null : "not enabled",
(value) -> Foldables.stringLiteralValueOf(value, "Unexpected value"),
null
);
public static final QuerySettingDef<ZoneId> TIME_ZONE = new QuerySettingDef<>(
"time_zone",
DataType.KEYWORD,
false,
true,
true,
"The default timezone to be used in the query, by the functions and commands that require it. Defaults to UTC",
(value) -> {
String timeZone = Foldables.stringLiteralValueOf(value, "Unexpected value");
try {
return ZoneId.of(timeZone);
} catch (Exception exc) {
throw new IllegalArgumentException("Invalid time zone [" + timeZone + "]");
}
},
ZoneOffset.UTC
);
public static final Map<String, QuerySettingDef<?>> SETTINGS_BY_NAME = Stream.of(PROJECT_ROUTING, TIME_ZONE)
.collect(Collectors.toMap(QuerySettingDef::name, Function.identity()));
public static void validate(EsqlStatement statement, SettingsValidationContext ctx) {
for (QuerySetting setting : statement.settings()) {
QuerySettingDef<?> def = SETTINGS_BY_NAME.get(setting.name());
if (def == null) {
throw new ParsingException(setting.source(), "Unknown setting [" + setting.name() + "]");
}
if (def.snapshotOnly && ctx.isSnapshot() == false) {
throw new ParsingException(setting.source(), "Setting [" + setting.name() + "] is only available in snapshot builds");
}
if (setting.value().dataType() != def.type()) {
throw new ParsingException(setting.source(), "Setting [" + setting.name() + "] must be of type " + def.type());
}
Literal literal;
if (setting.value() instanceof Literal l) {
literal = l;
} else {
throw new ParsingException(setting.source(), "Setting [" + setting.name() + "] must have a literal value");
}
String error = def.validator().validate(literal, ctx);
if (error != null) {
throw new ParsingException("Error validating setting [" + setting.name() + "]: " + error);
}
}
}
/**
* Definition of a query setting.
*
* @param name The name to be used when setting it in the query. E.g. {@code SET name=value}
* @param type The allowed datatype of the setting.
* @param serverlessOnly
* @param preview
* @param snapshotOnly
* @param description The user-facing description of the setting.
* @param validator A validation function to check the setting value.
* Defaults to calling the {@link #parser} and returning the error message of any exception it throws.
* @param parser A function to parse the setting value into the final object.
* @param defaultValue A default value to be used when the setting is not set.
* @param <T> The type of the setting value.
*/
public record QuerySettingDef<T>(
String name,
DataType type,
boolean serverlessOnly,
boolean preview,
boolean snapshotOnly,
String description,
Validator validator,
Parser<T> parser,
T defaultValue
) {
/**
* Constructor with a default validator that delegates to the parser.
*/
public QuerySettingDef(
String name,
DataType type,
boolean serverlessOnly,
boolean preview,
boolean snapshotOnly,
String description,
Parser<T> parser,
T defaultValue
) {
this(name, type, serverlessOnly, preview, snapshotOnly, description, (value, rcs) -> {
try {
parser.parse(value);
return null;
} catch (Exception exc) {
return exc.getMessage();
}
}, parser, defaultValue);
}
public T parse(@Nullable Literal value) {
if (value == null) {
return defaultValue;
}
return parser.parse(value);
}
@FunctionalInterface
public
|
QuerySettings
|
java
|
elastic__elasticsearch
|
x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/email/Attachment.java
|
{
"start": 7117,
"end": 8469
}
|
class ____ extends Bytes {
protected XContent(String id, ToXContent content, XContentType type) {
this(id, id, content, type);
}
protected XContent(String id, String name, ToXContent content, XContentType type) {
super(id, name, bytes(name, content, type), mimeType(type), false, Collections.emptySet());
}
static String mimeType(XContentType type) {
return switch (type) {
case JSON -> "application/json";
case YAML -> "application/yaml";
case SMILE -> "application/smile";
case CBOR -> "application/cbor";
default -> throw new IllegalArgumentException("unsupported xcontent attachment type [" + type.name() + "]");
};
}
static byte[] bytes(String name, ToXContent content, XContentType type) {
try {
XContentBuilder builder = XContentBuilder.builder(type.xContent()).prettyPrint();
content.toXContent(builder, ToXContent.EMPTY_PARAMS);
return BytesReference.toBytes(BytesReference.bytes(builder));
} catch (IOException ioe) {
throw new ElasticsearchException("could not create an xcontent attachment [" + name + "]", ioe);
}
}
public static
|
XContent
|
java
|
apache__kafka
|
metadata/src/main/java/org/apache/kafka/controller/QuorumController.java
|
{
"start": 42111,
"end": 51659
}
|
class ____ implements RaftClient.Listener<ApiMessageAndVersion> {
@Override
public void handleCommit(BatchReader<ApiMessageAndVersion> reader) {
appendRaftEvent("handleCommit[baseOffset=" + reader.baseOffset() + "]", () -> {
try {
boolean isActive = isActiveController();
while (reader.hasNext()) {
Batch<ApiMessageAndVersion> batch = reader.next();
long offset = batch.lastOffset();
int epoch = batch.epoch();
List<ApiMessageAndVersion> messages = batch.records();
if (messages.isEmpty()) {
log.debug("Skipping handling commit for batch with no data records with offset {} and epoch {}.", offset, epoch);
offsetControl.handleCommitBatchMetrics(batch);
} else if (isActive) {
// If the controller is active, the records were already replayed,
// so we don't need to do it here.
log.debug("Completing purgatory items up to offset {} and epoch {}.", offset, epoch);
// Advance the committed and stable offsets then complete any pending purgatory
// items that were waiting for these offsets.
offsetControl.handleCommitBatch(batch);
deferredEventQueue.completeUpTo(offsetControl.lastStableOffset());
} else {
// If the controller is a standby, replay the records that were
// created by the active controller.
if (log.isDebugEnabled()) {
log.debug("Replaying commits from the active node up to " +
"offset {} and epoch {}.", offset, epoch);
}
int recordIndex = 0;
for (ApiMessageAndVersion message : messages) {
long recordOffset = batch.baseOffset() + recordIndex;
try {
replay(message.message(), Optional.empty(), recordOffset);
} catch (Throwable e) {
String failureMessage = String.format("Unable to apply %s " +
"record at offset %d on standby controller, from the " +
"batch with baseOffset %d",
message.message().getClass().getSimpleName(),
recordOffset, batch.baseOffset());
throw fatalFaultHandler.handleFault(failureMessage, e);
}
recordIndex++;
}
offsetControl.handleCommitBatch(batch);
}
}
} finally {
reader.close();
}
});
}
@Override
public void handleLoadSnapshot(SnapshotReader<ApiMessageAndVersion> reader) {
appendRaftEvent(String.format("handleLoadSnapshot[snapshotId=%s]", reader.snapshotId()), () -> {
try {
String snapshotName = Snapshots.filenameFromSnapshotId(reader.snapshotId());
if (isActiveController()) {
throw fatalFaultHandler.handleFault("Asked to load snapshot " + snapshotName +
", but we are the active controller at epoch " + curClaimEpoch);
}
offsetControl.beginLoadSnapshot(reader.snapshotId());
while (reader.hasNext()) {
Batch<ApiMessageAndVersion> batch = reader.next();
long offset = batch.lastOffset();
List<ApiMessageAndVersion> messages = batch.records();
log.debug("Replaying snapshot {} batch with last offset of {}",
snapshotName, offset);
int i = 1;
for (ApiMessageAndVersion message : messages) {
try {
replay(message.message(), Optional.of(reader.snapshotId()),
reader.lastContainedLogOffset());
} catch (Throwable e) {
String failureMessage = String.format("Unable to apply %s record " +
"from snapshot %s on standby controller, which was %d of " +
"%d record(s) in the batch with baseOffset %d.",
message.message().getClass().getSimpleName(), reader.snapshotId(),
i, messages.size(), batch.baseOffset());
throw fatalFaultHandler.handleFault(failureMessage, e);
}
i++;
}
}
offsetControl.endLoadSnapshot(reader.lastContainedLogTimestamp());
} catch (FaultHandlerException e) {
throw e;
} catch (Throwable e) {
throw fatalFaultHandler.handleFault("Error while loading snapshot " +
reader.snapshotId(), e);
} finally {
reader.close();
}
});
}
@Override
public void handleLeaderChange(LeaderAndEpoch newLeader) {
appendRaftEvent("handleLeaderChange[" + newLeader.epoch() + "]", () -> {
final String newLeaderName = newLeader.leaderId().isPresent() ?
String.valueOf(newLeader.leaderId().getAsInt()) : "(none)";
if (newLeader.leaderId().isPresent()) {
controllerMetrics.incrementNewActiveControllers();
}
if (isActiveController()) {
if (newLeader.isLeader(nodeId)) {
log.warn("We were the leader in epoch {}, and are still the leader " +
"in the new epoch {}.", curClaimEpoch, newLeader.epoch());
curClaimEpoch = newLeader.epoch();
} else {
log.warn("Renouncing the leadership due to a metadata log event. " +
"We were the leader at epoch {}, but in the new epoch {}, " +
"the leader is {}. Reverting to last stable offset {}.",
curClaimEpoch, newLeader.epoch(), newLeaderName,
offsetControl.lastStableOffset());
renounce();
}
} else if (newLeader.isLeader(nodeId)) {
long newNextWriteOffset = raftClient.logEndOffset();
log.info("Becoming the active controller at epoch {}, next write offset {}.",
newLeader.epoch(), newNextWriteOffset);
claim(newLeader.epoch(), newNextWriteOffset);
} else {
log.info("In the new epoch {}, the leader is {}.",
newLeader.epoch(), newLeaderName);
}
});
}
@Override
public void beginShutdown() {
queue.beginShutdown("QuorumMetaLogListener");
}
private void appendRaftEvent(String name, Runnable runnable) {
appendControlEvent(name, () -> {
if (this != metaLogListener) {
log.debug("Ignoring {} raft event from an old registration", name);
} else {
runnable.run();
}
});
}
}
private boolean isActiveController() {
return isActiveController(curClaimEpoch);
}
private static boolean isActiveController(int claimEpoch) {
return claimEpoch != -1;
}
private void claim(int epoch, long newNextWriteOffset) {
try {
if (curClaimEpoch != -1) {
throw new RuntimeException("Cannot claim leadership because we are already the " +
"active controller.");
}
curClaimEpoch = epoch;
offsetControl.activate(newNextWriteOffset);
clusterControl.activate();
// Prepend the activate event. It is important that this event go at the beginning
// of the queue rather than the end (hence prepend rather than append). It's also
// important not to use prepend for anything else, to preserve the ordering here.
ControllerWriteEvent<Void> activationEvent = new ControllerWriteEvent<>(
"completeActivation[" + epoch + "]",
new CompleteActivationEvent(),
EnumSet.of(DOES_NOT_UPDATE_QUEUE_TIME)
);
queue.prepend(activationEvent);
} catch (Throwable e) {
fatalFaultHandler.handleFault("exception while claiming leadership", e);
}
}
|
QuorumMetaLogListener
|
java
|
google__guava
|
android/guava/src/com/google/common/cache/LongAddables.java
|
{
"start": 1741,
"end": 2057
}
|
class ____ extends AtomicLong implements LongAddable {
@Override
public void increment() {
getAndIncrement();
}
@Override
public void add(long x) {
getAndAdd(x);
}
@Override
public long sum() {
return get();
}
}
private LongAddables() {}
}
|
PureJavaLongAddable
|
java
|
elastic__elasticsearch
|
test/framework/src/main/java/org/elasticsearch/ingest/TestIngestDocument.java
|
{
"start": 796,
"end": 3163
}
|
class ____ {
public static final long DEFAULT_VERSION = 12345L;
private static final String VERSION = IngestDocument.Metadata.VERSION.getFieldName();
/**
* Create an {@link IngestDocument} from the given sourceAndMetadata and ingestMetadata and a version validator that allows null
* _versions. Normally null _version is not allowed, but many tests don't care about that invariant.
*/
public static IngestDocument withDefaultVersion(Map<String, Object> sourceAndMetadata, Map<String, Object> ingestMetadata) {
if (sourceAndMetadata.containsKey(VERSION) == false) {
sourceAndMetadata = new HashMap<>(sourceAndMetadata);
sourceAndMetadata.put(VERSION, DEFAULT_VERSION);
}
return new IngestDocument(sourceAndMetadata, ingestMetadata);
}
/**
* Create an {@link IngestDocument} with {@link #DEFAULT_VERSION} as the _version metadata, if _version is not already present.
*/
public static IngestDocument withDefaultVersion(Map<String, Object> sourceAndMetadata) {
return withDefaultVersion(sourceAndMetadata, new HashMap<>());
}
/**
* Create an empty ingest document for testing.
* <p>
* Adds the required {@code "_version"} metadata key with value {@link #DEFAULT_VERSION}.
*/
public static IngestDocument emptyIngestDocument() {
Map<String, Object> sourceAndMetadata = new HashMap<>();
sourceAndMetadata.put(VERSION, DEFAULT_VERSION);
return new IngestDocument(sourceAndMetadata, new HashMap<>());
}
public static Tuple<String, Object> randomMetadata() {
IngestDocument.Metadata metadata = ESTestCase.randomFrom(IngestDocument.Metadata.values());
return new Tuple<>(metadata.getFieldName(), switch (metadata) {
case VERSION, IF_SEQ_NO, IF_PRIMARY_TERM -> ESTestCase.randomIntBetween(0, 124);
case VERSION_TYPE -> VersionType.toString(ESTestCase.randomFrom(VersionType.values()));
case DYNAMIC_TEMPLATES -> Map.of(ESTestCase.randomAlphaOfLengthBetween(5, 10), ESTestCase.randomAlphaOfLengthBetween(5, 10));
default -> ESTestCase.randomAlphaOfLengthBetween(5, 10);
});
}
public static long randomVersion() {
return ESTestCase.randomLongBetween(Versions.MATCH_DELETED, Long.MAX_VALUE);
}
}
|
TestIngestDocument
|
java
|
apache__flink
|
flink-table/flink-table-common/src/main/java/org/apache/flink/table/functions/AsyncScalarFunction.java
|
{
"start": 3679,
"end": 3959
}
|
class ____ extends AsyncScalarFunction {
* public void eval(@DataTypeHint("DECIMAL(10, 4)") CompletableFuture<BigDecimal> future,
* Long... values) {
* // ...
* }
* }
* }</pre>
*
* <p>For storing a user-defined function in a catalog, the
|
SumFunction
|
java
|
apache__flink
|
flink-core-api/src/main/java/org/apache/flink/api/common/typeinfo/TypeDescriptors.java
|
{
"start": 1126,
"end": 6903
}
|
class ____ implements Serializable {
@SuppressWarnings("unchecked")
public static <T> TypeDescriptor<T> value(TypeDescriptor<T> typeDescriptor)
throws ReflectiveOperationException {
return (TypeDescriptor<T>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.ValueTypeDescriptorImpl",
typeDescriptor);
}
@SuppressWarnings("unchecked")
public static <K, V> TypeDescriptor<Map<K, V>> map(
TypeDescriptor<K> keyTypeDescriptor, TypeDescriptor<V> valueTypeDescriptor)
throws ReflectiveOperationException {
return (TypeDescriptor<Map<K, V>>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.MapTypeDescriptorImpl",
keyTypeDescriptor,
valueTypeDescriptor);
}
@SuppressWarnings("unchecked")
public static <T> TypeDescriptor<List<T>> list(TypeDescriptor<T> elementTypeDescriptor)
throws ReflectiveOperationException {
return (TypeDescriptor<List<T>>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.ListTypeDescriptorImpl",
elementTypeDescriptor);
}
// BasicTypeInfo type descriptors
public static final TypeDescriptor<String> STRING;
public static final TypeDescriptor<Integer> INT;
public static final TypeDescriptor<Boolean> BOOLEAN;
public static final TypeDescriptor<Long> LONG;
public static final TypeDescriptor<Byte> BYTE;
public static final TypeDescriptor<Short> SHORT;
public static final TypeDescriptor<Double> DOUBLE;
public static final TypeDescriptor<Float> FLOAT;
public static final TypeDescriptor<Character> CHAR;
static {
try {
@SuppressWarnings("unchecked")
TypeDescriptor<String> stringTypeTemp =
(TypeDescriptor<String>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.BasicTypeDescriptorImpl",
(TypeDescriptor<String>) () -> String.class);
STRING = stringTypeTemp;
@SuppressWarnings("unchecked")
TypeDescriptor<Integer> intTypeTemp =
(TypeDescriptor<Integer>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.BasicTypeDescriptorImpl",
(TypeDescriptor<Integer>) () -> Integer.class);
INT = intTypeTemp;
@SuppressWarnings("unchecked")
TypeDescriptor<Boolean> booleanTypeTemp =
(TypeDescriptor<Boolean>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.BasicTypeDescriptorImpl",
(TypeDescriptor<Boolean>) () -> Boolean.class);
BOOLEAN = booleanTypeTemp;
@SuppressWarnings("unchecked")
TypeDescriptor<Long> longTypeTemp =
(TypeDescriptor<Long>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.BasicTypeDescriptorImpl",
(TypeDescriptor<Long>) () -> Long.class);
LONG = longTypeTemp;
@SuppressWarnings("unchecked")
TypeDescriptor<Byte> byteTypeTemp =
(TypeDescriptor<Byte>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.BasicTypeDescriptorImpl",
(TypeDescriptor<Byte>) () -> Byte.class);
BYTE = byteTypeTemp;
@SuppressWarnings("unchecked")
TypeDescriptor<Short> shortTypeTemp =
(TypeDescriptor<Short>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.BasicTypeDescriptorImpl",
(TypeDescriptor<Short>) () -> Short.class);
SHORT = shortTypeTemp;
@SuppressWarnings("unchecked")
TypeDescriptor<Double> doubleTypeTemp =
(TypeDescriptor<Double>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.BasicTypeDescriptorImpl",
(TypeDescriptor<Double>) () -> Double.class);
DOUBLE = doubleTypeTemp;
@SuppressWarnings("unchecked")
TypeDescriptor<Float> floatTypeTemp =
(TypeDescriptor<Float>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.BasicTypeDescriptorImpl",
(TypeDescriptor<Float>) () -> Float.class);
FLOAT = floatTypeTemp;
@SuppressWarnings("unchecked")
TypeDescriptor<Character> charTypeTemp =
(TypeDescriptor<Character>)
TypeUtils.getInstance(
"org.apache.flink.api.common.typeinfo.descriptor.BasicTypeDescriptorImpl",
(TypeDescriptor<Character>) () -> Character.class);
CHAR = charTypeTemp;
} catch (ReflectiveOperationException e) {
throw new RuntimeException(e);
}
}
}
|
TypeDescriptors
|
java
|
spring-projects__spring-framework
|
spring-test/src/test/java/org/springframework/test/web/servlet/client/assertj/RestTestClientResponseTests.java
|
{
"start": 1412,
"end": 3777
}
|
class ____ {
private final RestTestClient client =
RestTestClient.bindToController(HelloController.class)
.configureMessageConverters(HttpMessageConverters.Builder::registerDefaults)
.build();
@Test
void status() {
ResponseSpec spec = client.get().uri("/greeting").exchange();
assertThat(RestTestClientResponse.from(spec)).hasStatusOk().hasStatus2xxSuccessful();
}
@Test
void headers() {
RestTestClient.ResponseSpec spec = client.get().uri("/greeting").exchange();
RestTestClientResponse response = RestTestClientResponse.from(spec);
assertThat(response).hasStatusOk();
assertThat(response).headers()
.containsOnlyHeaders(HttpHeaders.CONTENT_TYPE, HttpHeaders.CONTENT_LENGTH)
.hasValue(HttpHeaders.CONTENT_TYPE, "text/plain;charset=ISO-8859-1")
.hasValue(HttpHeaders.CONTENT_LENGTH, 11);
}
@Test
void contentType() {
ResponseSpec spec = client.get().uri("/greeting").exchange();
RestTestClientResponse response = RestTestClientResponse.from(spec);
assertThat(response).hasStatusOk();
assertThat(response).contentType().isEqualTo("text/plain;charset=ISO-8859-1");
assertThat(response).hasContentTypeCompatibleWith(MediaType.TEXT_PLAIN);
}
@Test
void cookies() {
ResponseSpec spec = client.get().uri("/cookie").exchange();
RestTestClientResponse response = RestTestClientResponse.from(spec);
assertThat(response).hasStatusOk();
assertThat(response).cookies().hasValue("foo", "bar");
assertThat(response).body().isEmpty();
}
@Test
void bodyText() {
ResponseSpec spec = client.get().uri("/greeting").exchange();
RestTestClientResponse response = RestTestClientResponse.from(spec);
assertThat(response).hasStatusOk();
assertThat(response).contentType().isCompatibleWith(MediaType.TEXT_PLAIN);
assertThat(response).bodyText().isEqualTo("Hello World");
assertThat(response).hasBodyTextEqualTo("Hello World");
}
@Test
void bodyJson() {
ResponseSpec spec = client.get().uri("/message").exchange();
RestTestClientResponse response = RestTestClientResponse.from(spec);
assertThat(response).hasStatusOk();
assertThat(response).contentType().isEqualTo(MediaType.APPLICATION_JSON);
assertThat(response).bodyJson().extractingPath("$.message").asString().isEqualTo("Hello World");
}
@SuppressWarnings("unused")
@RestController
private static
|
RestTestClientResponseTests
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/aggregator/AggregateCompletionSizeExpressionAndTimeoutTest.java
|
{
"start": 1098,
"end": 1934
}
|
class ____ extends ContextTestSupport {
@Test
public void testAggregateExpressionSize() throws Exception {
MockEndpoint result = getMockEndpoint("mock:result");
result.expectedBodiesReceived("A+B", "C+D", "E");
template.sendBody("direct:start", "A,B,C,D,E");
assertMockEndpointsSatisfied();
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
from("direct:start").split(body().tokenize(",")).aggregate(constant(true), new BodyInAggregatingStrategy())
.completionSize(constant(2)).completionTimeout(1000)
.to("log:result", "mock:result");
}
};
}
}
|
AggregateCompletionSizeExpressionAndTimeoutTest
|
java
|
mockito__mockito
|
mockito-core/src/test/java/org/mockitousage/bugs/creation/MockClassWithMissingStaticDepTest.java
|
{
"start": 1644,
"end": 1880
}
|
class ____ {
static {
//noinspection ConstantValue
if (true) {
throw new NoClassDefFoundError(
"Simulate missing transitive dependency used in
|
ClassWithErrorInClassInit
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FloatVector.java
|
{
"start": 759,
"end": 6067
}
|
interface ____ extends Vector permits ConstantFloatVector, FloatArrayVector, FloatBigArrayVector, ConstantNullVector {
float getFloat(int position);
@Override
FloatBlock asBlock();
@Override
FloatVector filter(int... positions);
@Override
FloatBlock keepMask(BooleanVector mask);
/**
* Make a deep copy of this {@link Vector} using the provided {@link BlockFactory},
* likely copying all data.
*/
@Override
default FloatVector deepCopy(BlockFactory blockFactory) {
try (FloatBlock.Builder builder = blockFactory.newFloatBlockBuilder(getPositionCount())) {
builder.copyFrom(asBlock(), 0, getPositionCount());
builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING);
return builder.build().asVector();
}
}
@Override
ReleasableIterator<? extends FloatBlock> lookup(IntBlock positions, ByteSizeValue targetBlockSize);
/**
* Compares the given object with this vector for equality. Returns {@code true} if and only if the
* given object is a FloatVector, and both vectors are {@link #equals(FloatVector, FloatVector) equal}.
*/
@Override
boolean equals(Object obj);
/** Returns the hash code of this vector, as defined by {@link #hash(FloatVector)}. */
@Override
int hashCode();
/**
* Returns {@code true} if the given vectors are equal to each other, otherwise {@code false}.
* Two vectors are considered equal if they have the same position count, and contain the same
* values in the same order. This definition ensures that the equals method works properly
* across different implementations of the FloatVector interface.
*/
static boolean equals(FloatVector vector1, FloatVector vector2) {
final int positions = vector1.getPositionCount();
if (positions != vector2.getPositionCount()) {
return false;
}
for (int pos = 0; pos < positions; pos++) {
if (vector1.getFloat(pos) != vector2.getFloat(pos)) {
return false;
}
}
return true;
}
/**
* Generates the hash code for the given vector. The hash code is computed from the vector's values.
* This ensures that {@code vector1.equals(vector2)} implies that {@code vector1.hashCode()==vector2.hashCode()}
* for any two vectors, {@code vector1} and {@code vector2}, as required by the general contract of
* {@link Object#hashCode}.
*/
static int hash(FloatVector vector) {
final int len = vector.getPositionCount();
int result = 1;
for (int pos = 0; pos < len; pos++) {
result = 31 * result + Float.floatToIntBits(vector.getFloat(pos));
}
return result;
}
/** Deserializes a Vector from the given stream input. */
static FloatVector readFrom(BlockFactory blockFactory, StreamInput in) throws IOException {
final int positions = in.readVInt();
final byte serializationType = in.readByte();
return switch (serializationType) {
case SERIALIZE_VECTOR_VALUES -> readValues(positions, in, blockFactory);
case SERIALIZE_VECTOR_CONSTANT -> blockFactory.newConstantFloatVector(in.readFloat(), positions);
case SERIALIZE_VECTOR_ARRAY -> FloatArrayVector.readArrayVector(positions, in, blockFactory);
case SERIALIZE_VECTOR_BIG_ARRAY -> FloatBigArrayVector.readArrayVector(positions, in, blockFactory);
default -> {
assert false : "invalid vector serialization type [" + serializationType + "]";
throw new IllegalStateException("invalid vector serialization type [" + serializationType + "]");
}
};
}
/** Serializes this Vector to the given stream output. */
default void writeTo(StreamOutput out) throws IOException {
final int positions = getPositionCount();
final var version = out.getTransportVersion();
out.writeVInt(positions);
if (isConstant() && positions > 0) {
out.writeByte(SERIALIZE_VECTOR_CONSTANT);
out.writeFloat(getFloat(0));
} else if (this instanceof FloatArrayVector v) {
out.writeByte(SERIALIZE_VECTOR_ARRAY);
v.writeArrayVector(positions, out);
} else if (this instanceof FloatBigArrayVector v) {
out.writeByte(SERIALIZE_VECTOR_BIG_ARRAY);
v.writeArrayVector(positions, out);
} else {
out.writeByte(SERIALIZE_VECTOR_VALUES);
writeValues(this, positions, out);
}
}
private static FloatVector readValues(int positions, StreamInput in, BlockFactory blockFactory) throws IOException {
try (var builder = blockFactory.newFloatVectorFixedBuilder(positions)) {
for (int i = 0; i < positions; i++) {
builder.appendFloat(i, in.readFloat());
}
return builder.build();
}
}
private static void writeValues(FloatVector v, int positions, StreamOutput out) throws IOException {
for (int i = 0; i < positions; i++) {
out.writeFloat(v.getFloat(i));
}
}
/**
* A builder that grows as needed.
*/
sealed
|
FloatVector
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/test/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/reader/DummyDocumentStoreReader.java
|
{
"start": 1820,
"end": 4651
}
|
class ____<TimelineDoc extends TimelineDocument>
implements DocumentStoreReader<TimelineDoc> {
private final TimelineEntityDocument entityDoc;
private final List<TimelineEntityDocument> entityDocs;
private final FlowRunDocument flowRunDoc;
private final FlowActivityDocument flowActivityDoc;
public DummyDocumentStoreReader() {
try {
entityDoc = DocumentStoreTestUtils.bakeTimelineEntityDoc();
entityDocs = DocumentStoreTestUtils.bakeYarnAppTimelineEntities();
flowRunDoc = DocumentStoreTestUtils.bakeFlowRunDoc();
flowActivityDoc = DocumentStoreTestUtils.bakeFlowActivityDoc();
} catch (IOException e) {
throw new RuntimeException("Unable to create " +
"DummyDocumentStoreReader : ", e);
}
}
@Override
@SuppressWarnings("unchecked")
public TimelineDoc readDocument(String collectionName, TimelineReaderContext
context, Class<TimelineDoc> docClass) {
switch (TimelineEntityType.valueOf(context.getEntityType())) {
case YARN_FLOW_ACTIVITY:
return (TimelineDoc) flowActivityDoc;
case YARN_FLOW_RUN:
return (TimelineDoc) flowRunDoc;
default:
return (TimelineDoc) entityDoc;
}
}
@Override
@SuppressWarnings("unchecked")
public List<TimelineDoc> readDocumentList(String collectionName,
TimelineReaderContext context, Class<TimelineDoc> docClass, long size) {
switch (TimelineEntityType.valueOf(context.getEntityType())) {
case YARN_FLOW_ACTIVITY:
List<FlowActivityDocument> flowActivityDocs = new ArrayList<>();
flowActivityDocs.add(flowActivityDoc);
if (size > flowActivityDocs.size()) {
size = flowActivityDocs.size();
}
return (List<TimelineDoc>) flowActivityDocs.subList(0, (int) size);
case YARN_FLOW_RUN:
List<FlowRunDocument> flowRunDocs = new ArrayList<>();
flowRunDocs.add(flowRunDoc);
if (size > flowRunDocs.size()) {
size = flowRunDocs.size();
}
return (List<TimelineDoc>) flowRunDocs.subList(0, (int) size);
case YARN_APPLICATION:
List<TimelineEntityDocument> applicationEntities = new ArrayList<>();
applicationEntities.add(entityDoc);
if (size > applicationEntities.size()) {
size = applicationEntities.size();
}
return (List<TimelineDoc>) applicationEntities.subList(0, (int) size);
default:
if (size > entityDocs.size() || size == -1) {
size = entityDocs.size();
}
return (List<TimelineDoc>) entityDocs.subList(0, (int) size);
}
}
@Override
public Set<String> fetchEntityTypes(String collectionName,
TimelineReaderContext context) {
return entityDocs.stream().map(TimelineEntityDocument::getType)
.collect(Collectors.toSet());
}
@Override
public void close() {
}
}
|
DummyDocumentStoreReader
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/main/java/org/hibernate/dialect/aggregate/CockroachDBAggregateSupport.java
|
{
"start": 9836,
"end": 11268
}
|
class ____ implements JsonWriteExpression {
private final SelectableMapping selectableMapping;
private final String customWriteExpressionStart;
private final String customWriteExpressionEnd;
BasicJsonWriteExpression(SelectableMapping selectableMapping, String customWriteExpression) {
this.selectableMapping = selectableMapping;
if ( customWriteExpression.equals( "?" ) ) {
this.customWriteExpressionStart = "";
this.customWriteExpressionEnd = "";
}
else {
final String[] parts = StringHelper.split( "?", customWriteExpression );
assert parts.length == 2;
this.customWriteExpressionStart = parts[0];
this.customWriteExpressionEnd = parts[1];
}
}
@Override
public void append(
SqlAppender sb,
String path,
SqlAstTranslator<?> translator,
AggregateColumnWriteExpression expression) {
sb.append( '\'' );
sb.append( selectableMapping.getSelectableName() );
sb.append( "'," );
sb.append( customWriteExpressionStart );
// We use NO_UNTYPED here so that expressions which require type inference are casted explicitly,
// since we don't know how the custom write expression looks like where this is embedded,
// so we have to be pessimistic and avoid ambiguities
translator.render( expression.getValueExpression( selectableMapping ), SqlAstNodeRenderingMode.NO_UNTYPED );
sb.append( customWriteExpressionEnd );
}
}
}
|
BasicJsonWriteExpression
|
java
|
google__truth
|
extensions/proto/src/main/java/com/google/common/truth/extensions/proto/MapWithProtoValuesFluentAssertion.java
|
{
"start": 1936,
"end": 23558
}
|
interface ____<M extends Message> {
/**
* Specifies that the 'has' bit of individual fields should be ignored when comparing for
* equality.
*
* <p>For version 2 Protocol Buffers, this setting determines whether two protos with the same
* value for a field compare equal if one explicitly sets the value, and the other merely
* implicitly uses the schema-defined default. This setting also determines whether unknown fields
* should be considered in the comparison. By {@code ignoringFieldAbsence()}, unknown fields are
* ignored, and value-equal fields as specified above are considered equal.
*
* <p>For version 3 Protocol Buffers, this setting does not affect primitive fields, because their
* default value is indistinguishable from unset.
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceForValues();
/**
* Specifies that the 'has' bit of these explicitly specified top-level field numbers should be
* ignored when comparing for equality. Sub-fields must be specified explicitly (via {@link
* FieldDescriptor}) if they are to be ignored as well.
*
* <p>Use {@link #ignoringFieldAbsenceForValues()} instead to ignore the 'has' bit for all fields.
*
* @see #ignoringFieldAbsenceForValues() for details
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceOfFieldsForValues(
int firstFieldNumber, int... rest);
/**
* Specifies that the 'has' bit of these explicitly specified top-level field numbers should be
* ignored when comparing for equality. Sub-fields must be specified explicitly (via {@link
* FieldDescriptor}) if they are to be ignored as well.
*
* <p>Use {@link #ignoringFieldAbsenceForValues()} instead to ignore the 'has' bit for all fields.
*
* @see #ignoringFieldAbsenceForValues() for details
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceOfFieldsForValues(
Iterable<Integer> fieldNumbers);
/**
* Specifies that the 'has' bit of these explicitly specified field descriptors should be ignored
* when comparing for equality. Sub-fields must be specified explicitly if they are to be ignored
* as well.
*
* <p>Use {@link #ignoringFieldAbsenceForValues()} instead to ignore the 'has' bit for all fields.
*
* @see #ignoringFieldAbsenceForValues() for details
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceOfFieldDescriptorsForValues(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Specifies that the 'has' bit of these explicitly specified field descriptors should be ignored
* when comparing for equality. Sub-fields must be specified explicitly if they are to be ignored
* as well.
*
* <p>Use {@link #ignoringFieldAbsenceForValues()} instead to ignore the 'has' bit for all fields.
*
* @see #ignoringFieldAbsenceForValues() for details
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldAbsenceOfFieldDescriptorsForValues(
Iterable<FieldDescriptor> fieldDescriptors);
/**
* Specifies that the ordering of repeated fields, at all levels, should be ignored when comparing
* for equality.
*
* <p>This setting applies to all repeated fields recursively, but it does not ignore structure.
* For example, with {@link #ignoringRepeatedFieldOrderForValues()}, a repeated {@code int32}
* field {@code bar}, set inside a repeated message field {@code foo}, the following protos will
* all compare equal:
*
* <pre>{@code
* message1: {
* foo: {
* bar: 1
* bar: 2
* }
* foo: {
* bar: 3
* bar: 4
* }
* }
*
* message2: {
* foo: {
* bar: 2
* bar: 1
* }
* foo: {
* bar: 4
* bar: 3
* }
* }
*
* message3: {
* foo: {
* bar: 4
* bar: 3
* }
* foo: {
* bar: 2
* bar: 1
* }
* }
* }</pre>
*
* <p>However, the following message will compare equal to none of these:
*
* <pre>{@code
* message4: {
* foo: {
* bar: 1
* bar: 3
* }
* foo: {
* bar: 2
* bar: 4
* }
* }
* }</pre>
*
* <p>This setting does not apply to map fields, for which field order is always ignored. The
* serialization order of map fields is undefined, and it may change from runtime to runtime.
*/
MapWithProtoValuesFluentAssertion<M> ignoringRepeatedFieldOrderForValues();
/**
* Specifies that the ordering of repeated fields for these explicitly specified top-level field
* numbers should be ignored when comparing for equality. Sub-fields must be specified explicitly
* (via {@link FieldDescriptor}) if their orders are to be ignored as well.
*
* <p>Use {@link #ignoringRepeatedFieldOrderForValues()} instead to ignore order for all fields.
*
* @see #ignoringRepeatedFieldOrderForValues() for details.
*/
MapWithProtoValuesFluentAssertion<M> ignoringRepeatedFieldOrderOfFieldsForValues(
int firstFieldNumber, int... rest);
/**
* Specifies that the ordering of repeated fields for these explicitly specified top-level field
* numbers should be ignored when comparing for equality. Sub-fields must be specified explicitly
* (via {@link FieldDescriptor}) if their orders are to be ignored as well.
*
* <p>Use {@link #ignoringRepeatedFieldOrderForValues()} instead to ignore order for all fields.
*
* @see #ignoringRepeatedFieldOrderForValues() for details.
*/
MapWithProtoValuesFluentAssertion<M> ignoringRepeatedFieldOrderOfFieldsForValues(
Iterable<Integer> fieldNumbers);
/**
* Specifies that the ordering of repeated fields for these explicitly specified field descriptors
* should be ignored when comparing for equality. Sub-fields must be specified explicitly if their
* orders are to be ignored as well.
*
* <p>Use {@link #ignoringRepeatedFieldOrderForValues()} instead to ignore order for all fields.
*
* @see #ignoringRepeatedFieldOrderForValues() for details.
*/
MapWithProtoValuesFluentAssertion<M> ignoringRepeatedFieldOrderOfFieldDescriptorsForValues(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Specifies that the ordering of repeated fields for these explicitly specified field descriptors
* should be ignored when comparing for equality. Sub-fields must be specified explicitly if their
* orders are to be ignored as well.
*
* <p>Use {@link #ignoringRepeatedFieldOrderForValues()} instead to ignore order for all fields.
*
* @see #ignoringRepeatedFieldOrderForValues() for details.
*/
MapWithProtoValuesFluentAssertion<M> ignoringRepeatedFieldOrderOfFieldDescriptorsForValues(
Iterable<FieldDescriptor> fieldDescriptors);
/**
* Specifies that, for all repeated and map fields, any elements in the 'actual' proto which are
* not found in the 'expected' proto are ignored, with the exception of fields in the expected
* proto which are empty. To ignore empty repeated fields as well, use {@link
* #comparingExpectedFieldsOnlyForValues}.
*
* <p>This rule is applied independently from {@link #ignoringRepeatedFieldOrderForValues}. If
* ignoring repeated field order AND extra repeated field elements, all that is tested is that the
* expected elements comprise a subset of the actual elements. If not ignoring repeated field
* order, but still ignoring extra repeated field elements, the actual elements must contain a
* subsequence that matches the expected elements for the test to pass. (The subsequence rule does
* not apply to Map fields, which are always compared by key.)
*/
MapWithProtoValuesFluentAssertion<M> ignoringExtraRepeatedFieldElementsForValues();
/**
* Specifies that extra repeated field elements for these explicitly specified top-level field
* numbers should be ignored. Sub-fields must be specified explicitly (via {@link
* FieldDescriptor}) if their extra elements are to be ignored as well.
*
* <p>Use {@link #ignoringExtraRepeatedFieldElementsForValues()} instead to ignore these for all
* fields.
*
* @see #ignoringExtraRepeatedFieldElementsForValues() for details.
*/
MapWithProtoValuesFluentAssertion<M> ignoringExtraRepeatedFieldElementsOfFieldsForValues(
int firstFieldNumber, int... rest);
/**
* Specifies that extra repeated field elements for these explicitly specified top-level field
* numbers should be ignored. Sub-fields must be specified explicitly (via {@link
* FieldDescriptor}) if their extra elements are to be ignored as well.
*
* <p>Use {@link #ignoringExtraRepeatedFieldElementsForValues()} instead to ignore these for all
* fields.
*
* @see #ignoringExtraRepeatedFieldElementsForValues() for details.
*/
MapWithProtoValuesFluentAssertion<M> ignoringExtraRepeatedFieldElementsOfFieldsForValues(
Iterable<Integer> fieldNumbers);
/**
* Specifies that extra repeated field elements for these explicitly specified field descriptors
* should be ignored. Sub-fields must be specified explicitly if their extra elements are to be
* ignored as well.
*
* <p>Use {@link #ignoringExtraRepeatedFieldElementsForValues()} instead to ignore these for all
* fields.
*
* @see #ignoringExtraRepeatedFieldElementsForValues() for details.
*/
MapWithProtoValuesFluentAssertion<M>
ignoringExtraRepeatedFieldElementsOfFieldDescriptorsForValues(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Specifies that extra repeated field elements for these explicitly specified field descriptors
* should be ignored. Sub-fields must be specified explicitly if their extra elements are to be
* ignored as well.
*
* <p>Use {@link #ignoringExtraRepeatedFieldElementsForValues()} instead to ignore these for all
* fields.
*
* @see #ignoringExtraRepeatedFieldElementsForValues() for details.
*/
MapWithProtoValuesFluentAssertion<M>
ignoringExtraRepeatedFieldElementsOfFieldDescriptorsForValues(
Iterable<FieldDescriptor> fieldDescriptors);
/**
* Compares double fields as equal if they are both finite and their absolute difference is less
* than or equal to {@code tolerance}.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForValues(double tolerance);
/**
* Compares double fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForFieldsForValues(
double tolerance, int firstFieldNumber, int... rest);
/**
* Compares double fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForFieldsForValues(
double tolerance, Iterable<Integer> fieldNumbers);
/**
* Compares double fields with these explicitly specified fields using the provided absolute
* tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForFieldDescriptorsForValues(
double tolerance, FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Compares double fields with these explicitly specified fields using the provided absolute
* tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingDoubleToleranceForFieldDescriptorsForValues(
double tolerance, Iterable<FieldDescriptor> fieldDescriptors);
/**
* Compares float fields as equal if they are both finite and their absolute difference is less
* than or equal to {@code tolerance}.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForValues(float tolerance);
/**
* Compares float fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForFieldsForValues(
float tolerance, int firstFieldNumber, int... rest);
/**
* Compares float fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForFieldsForValues(
float tolerance, Iterable<Integer> fieldNumbers);
/**
* Compares float fields with these explicitly specified fields using the provided absolute
* tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForFieldDescriptorsForValues(
float tolerance, FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Compares float fields with these explicitly specified top-level field numbers using the
* provided absolute tolerance.
*
* @param tolerance A finite, non-negative tolerance.
*/
MapWithProtoValuesFluentAssertion<M> usingFloatToleranceForFieldDescriptorsForValues(
float tolerance, Iterable<FieldDescriptor> fieldDescriptors);
/**
* Limits the comparison of Protocol buffers to the fields set in the expected proto(s). When
* multiple protos are specified, the comparison is limited to the union of set fields in all the
* expected protos.
*
* <p>The "expected proto(s)" are those passed to the method at the end of the call chain, such as
* {@link #containsEntry} or {@link #containsExactlyEntriesIn}.
*
* <p>Fields not set in the expected proto(s) are ignored. In particular, proto3 fields which have
* their default values are ignored, as these are indistinguishable from unset fields. If you want
* to assert that a proto3 message has certain fields with default values, you cannot use this
* method.
*/
MapWithProtoValuesFluentAssertion<M> comparingExpectedFieldsOnlyForValues();
/**
* Limits the comparison of Protocol buffers to the defined {@link FieldScope}.
*
* <p>This method is additive and has well-defined ordering semantics. If the invoking {@link
* MapWithProtoValuesFluentAssertion} is already scoped to a {@link FieldScope} {@code X}, and
* this method is invoked with {@link FieldScope} {@code Y}, the resultant {@link
* MapWithProtoValuesFluentAssertion} is constrained to the intersection of {@link FieldScope}s
* {@code X} and {@code Y}.
*
* <p>By default, {@link MapWithProtoValuesFluentAssertion} is constrained to {@link
* FieldScopes#all()}, that is, no fields are excluded from comparison.
*/
MapWithProtoValuesFluentAssertion<M> withPartialScopeForValues(FieldScope fieldScope);
/**
* Excludes the top-level message fields with the given tag numbers from the comparison.
*
* <p>This method adds on any previous {@link FieldScope} related settings, overriding previous
* changes to ensure the specified fields are ignored recursively. All sub-fields of these field
* numbers are ignored, and all sub-messages of type {@code M} will also have these field numbers
* ignored.
*
* <p>If an invalid field number is supplied, the terminal comparison operation will throw a
* runtime exception.
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldsForValues(int firstFieldNumber, int... rest);
/**
* Excludes the top-level message fields with the given tag numbers from the comparison.
*
* <p>This method adds on any previous {@link FieldScope} related settings, overriding previous
* changes to ensure the specified fields are ignored recursively. All sub-fields of these field
* numbers are ignored, and all sub-messages of type {@code M} will also have these field numbers
* ignored.
*
* <p>If an invalid field number is supplied, the terminal comparison operation will throw a
* runtime exception.
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldsForValues(Iterable<Integer> fieldNumbers);
/**
* Excludes all message fields matching the given {@link FieldDescriptor}s from the comparison.
*
* <p>This method adds on any previous {@link FieldScope} related settings, overriding previous
* changes to ensure the specified fields are ignored recursively. All sub-fields of these field
* descriptors are ignored, no matter where they occur in the tree.
*
* <p>If a field descriptor which does not, or cannot occur in the proto structure is supplied, it
* is silently ignored.
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldDescriptorsForValues(
FieldDescriptor firstFieldDescriptor, FieldDescriptor... rest);
/**
* Excludes all message fields matching the given {@link FieldDescriptor}s from the comparison.
*
* <p>This method adds on any previous {@link FieldScope} related settings, overriding previous
* changes to ensure the specified fields are ignored recursively. All sub-fields of these field
* descriptors are ignored, no matter where they occur in the tree.
*
* <p>If a field descriptor which does not, or cannot occur in the proto structure is supplied, it
* is silently ignored.
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldDescriptorsForValues(
Iterable<FieldDescriptor> fieldDescriptors);
/**
* Excludes all specific field paths under the argument {@link FieldScope} from the comparison.
*
* <p>This method is additive and has well-defined ordering semantics. If the invoking {@link
* MapWithProtoValuesFluentAssertion} is already scoped to a {@link FieldScope} {@code X}, and
* this method is invoked with {@link FieldScope} {@code Y}, the resultant {@link
* MapWithProtoValuesFluentAssertion} is constrained to the subtraction of {@code X - Y}.
*
* <p>By default, {@link MapWithProtoValuesFluentAssertion} is constrained to {@link
* FieldScopes#all()}, that is, no fields are excluded from comparison.
*/
MapWithProtoValuesFluentAssertion<M> ignoringFieldScopeForValues(FieldScope fieldScope);
/**
* If set, in the event of a comparison failure, the error message printed will list only those
* specific fields that did not match between the actual and expected values. Useful for very
* large protocol buffers.
*
* <p>This a purely cosmetic setting, and it has no effect on the behavior of the test.
*/
MapWithProtoValuesFluentAssertion<M> reportingMismatchesOnlyForValues();
/**
* Specifies the {@link TypeRegistry} and {@link ExtensionRegistry} to use for {@link
* com.google.protobuf.Any Any} messages.
*
* <p>To compare the value of an {@code Any} message, ProtoTruth looks in the given type registry
* for a descriptor for the message's type URL:
*
* <ul>
* <li>If ProtoTruth finds a descriptor, it unpacks the value and compares it against the
* expected value, respecting any configuration methods used for the assertion.
* <li>If ProtoTruth does not find a descriptor (or if the value can't be deserialized with the
* descriptor), it compares the raw, serialized bytes of the expected and actual values.
* </ul>
*
* <p>When ProtoTruth unpacks a value, it is parsing a serialized proto. That proto may contain
* extensions. To look up those extensions, ProtoTruth uses the provided {@link
* ExtensionRegistry}.
*
* @since 1.1
*/
MapWithProtoValuesFluentAssertion<M> unpackingAnyUsingForValues(
TypeRegistry typeRegistry, ExtensionRegistry extensionRegistry);
/**
* Fails if the map does not contain an entry with the given key and a value that corresponds to
* the given value.
*/
void containsEntry(@Nullable Object expectedKey, @Nullable M expectedValue);
/**
* Fails if the map contains an entry with the given key and a value that corresponds to the given
* value.
*/
void doesNotContainEntry(@Nullable Object excludedKey, @Nullable M excludedValue);
/**
* Fails if the map does not contain exactly the given set of keys mapping to values that
* correspond to the given values.
*
* <p>The values must all be of type {@code M}, and a {@link ClassCastException} will be thrown if
* any other type is encountered.
*
* <p><b>Warning:</b> the use of varargs means that we cannot guarantee an equal number of
* key/value pairs at compile time. Please make sure you provide varargs in key/value pairs!
*/
@CanIgnoreReturnValue
Ordered containsExactly(@Nullable Object k0, @Nullable M v0, @Nullable Object... rest);
/**
* Fails if the map does not contain exactly the keys in the given map, mapping to values that
* correspond to the values of the given map.
*/
@CanIgnoreReturnValue
Ordered containsExactlyEntriesIn(Map<?, ? extends M> expectedMap);
/**
* @deprecated Do not call {@code equals()} on a {@code MapWithProtoValuesFluentAssertion}.
* @see com.google.common.truth.Subject#equals(Object)
*/
@Override
@Deprecated
boolean equals(@Nullable Object o);
/**
* @deprecated {@code MapWithProtoValuesFluentAssertion} does not support {@code hashCode()}.
* @see com.google.common.truth.Subject#hashCode()
*/
@Override
@Deprecated
int hashCode();
}
|
MapWithProtoValuesFluentAssertion
|
java
|
hibernate__hibernate-orm
|
hibernate-core/src/test/java/org/hibernate/orm/test/query/SubQueryInFromIdClassTests.java
|
{
"start": 9078,
"end": 9932
}
|
class ____ {
private Integer id1;
private Integer id2;
private Name name;
private Contact alternativeContact;
public Contact() {
}
public Contact(Integer id, Name name) {
this.id1 = id;
this.id2 = id;
this.name = name;
}
@Id
public Integer getId1() {
return id1;
}
public void setId1(Integer id1) {
this.id1 = id1;
}
@Id
public Integer getId2() {
return id2;
}
public void setId2(Integer id2) {
this.id2 = id2;
}
public Name getName() {
return name;
}
public void setName(Name name) {
this.name = name;
}
@ManyToOne(fetch = FetchType.LAZY)
public Contact getAlternativeContact() {
return alternativeContact;
}
public void setAlternativeContact(Contact alternativeContact) {
this.alternativeContact = alternativeContact;
}
@Embeddable
public static
|
Contact
|
java
|
apache__kafka
|
metadata/src/test/java/org/apache/kafka/metadata/bootstrap/BootstrapMetadataTest.java
|
{
"start": 1529,
"end": 5699
}
|
class ____ {
static final List<ApiMessageAndVersion> SAMPLE_RECORDS1 = List.of(
new ApiMessageAndVersion(new FeatureLevelRecord().
setName(FEATURE_NAME).
setFeatureLevel((short) 8), (short) 0),
new ApiMessageAndVersion(new NoOpRecord(), (short) 0),
new ApiMessageAndVersion(new FeatureLevelRecord().
setName(FEATURE_NAME).
setFeatureLevel((short) 7), (short) 0));
@Test
public void testFromVersion() {
assertEquals(new BootstrapMetadata(List.of(
new ApiMessageAndVersion(new FeatureLevelRecord().
setName(FEATURE_NAME).
setFeatureLevel((short) 7), (short) 0)),
IBP_3_3_IV3.featureLevel(), "foo"),
BootstrapMetadata.fromVersion(IBP_3_3_IV3, "foo"));
}
@Test
public void testFromRecordsList() {
assertEquals(new BootstrapMetadata(SAMPLE_RECORDS1, IBP_3_3_IV3.featureLevel(), "bar"),
BootstrapMetadata.fromRecords(SAMPLE_RECORDS1, "bar"));
}
@Test
public void testFromRecordsListWithoutMetadataVersion() {
assertEquals("No FeatureLevelRecord for metadata.version was found in the bootstrap " +
"metadata from quux", assertThrows(RuntimeException.class,
() -> BootstrapMetadata.fromRecords(List.of(), "quux")).getMessage());
}
private static final ApiMessageAndVersion MV_10 =
new ApiMessageAndVersion(new FeatureLevelRecord().
setName(FEATURE_NAME).
setFeatureLevel((short) 10), (short) 0);
private static final ApiMessageAndVersion MV_11 =
new ApiMessageAndVersion(new FeatureLevelRecord().
setName(FEATURE_NAME).
setFeatureLevel((short) 11), (short) 0);
private static final ApiMessageAndVersion FOO_1 =
new ApiMessageAndVersion(new FeatureLevelRecord().
setName("foo").
setFeatureLevel((short) 1), (short) 0);
private static final ApiMessageAndVersion FOO_2 =
new ApiMessageAndVersion(new FeatureLevelRecord().
setName("foo").
setFeatureLevel((short) 2), (short) 0);
@Test
public void testCopyWithNewFeatureRecord() {
assertEquals(BootstrapMetadata.fromRecords(List.of(MV_10, FOO_1), "src"),
BootstrapMetadata.fromRecords(List.of(MV_10), "src").
copyWithFeatureRecord("foo", (short) 1));
}
@Test
public void testFeatureLevelForMetadataVersion() {
assertEquals((short) 11, BootstrapMetadata.
fromRecords(List.of(MV_10, MV_11), "src").
featureLevel(FEATURE_NAME));
}
@Test
public void testCopyWithModifiedFeatureRecord() {
assertEquals(BootstrapMetadata.fromRecords(List.of(MV_10, FOO_2), "src"),
BootstrapMetadata.fromRecords(List.of(MV_10, FOO_1), "src").
copyWithFeatureRecord("foo", (short) 2));
}
@Test
public void testFeatureLevelForFeatureThatIsNotSet() {
assertEquals((short) 0, BootstrapMetadata.
fromRecords(List.of(MV_10), "src").featureLevel("foo"));
}
@Test
public void testFeatureLevelForFeature() {
assertEquals((short) 2, BootstrapMetadata.
fromRecords(List.of(MV_10, FOO_2), "src").featureLevel("foo"));
}
static final List<ApiMessageAndVersion> RECORDS_WITH_OLD_METADATA_VERSION = List.of(
new ApiMessageAndVersion(new FeatureLevelRecord().
setName(FEATURE_NAME).
setFeatureLevel(MetadataVersionTestUtils.IBP_3_0_IV1_FEATURE_LEVEL), (short) 0));
@Test
public void testFromRecordsListWithOldMetadataVersion() {
BootstrapMetadata bootstrapMetadata = BootstrapMetadata.fromRecords(RECORDS_WITH_OLD_METADATA_VERSION, "quux");
assertEquals("No MetadataVersion with feature level 1. Valid feature levels are from " + MetadataVersion.MINIMUM_VERSION.featureLevel()
+ " to " + MetadataVersion.latestTesting().featureLevel() + ".",
assertThrows(RuntimeException.class, bootstrapMetadata::metadataVersion).getMessage());
}
}
|
BootstrapMetadataTest
|
java
|
apache__dubbo
|
dubbo-remoting/dubbo-remoting-http12/src/main/java/org/apache/dubbo/remoting/http12/h2/Http2ServerChannelObserver.java
|
{
"start": 1512,
"end": 3820
}
|
class ____ extends AbstractServerHttpChannelObserver<H2StreamChannel>
implements FlowControlStreamObserver<Object>, Http2CancelableStreamObserver<Object> {
private CancellationContext cancellationContext;
private StreamingDecoder streamingDecoder;
private boolean autoRequestN = true;
public Http2ServerChannelObserver(H2StreamChannel h2StreamChannel) {
super(h2StreamChannel);
}
public void setStreamingDecoder(StreamingDecoder streamingDecoder) {
this.streamingDecoder = streamingDecoder;
}
@Override
protected HttpMetadata encodeHttpMetadata(boolean endStream) {
HttpHeaders headers = new NettyHttpHeaders<>(new DefaultHttp2Headers(false, 8));
headers.set(HttpHeaderNames.TE.getKey(), HttpConstants.TRAILERS);
return new Http2MetadataFrame(headers, endStream);
}
@Override
protected HttpMetadata encodeTrailers(Throwable throwable) {
return new Http2MetadataFrame(new NettyHttpHeaders<>(new DefaultHttp2Headers(false, 4)), true);
}
@Override
public void setCancellationContext(CancellationContext cancellationContext) {
this.cancellationContext = cancellationContext;
}
@Override
public CancellationContext getCancellationContext() {
return cancellationContext;
}
@Override
public void cancel(Throwable throwable) {
if (throwable instanceof CancelStreamException) {
if (((CancelStreamException) throwable).isCancelByRemote()) {
closed();
}
}
if (cancellationContext != null) {
cancellationContext.cancel(throwable);
}
long errorCode = 0;
if (throwable instanceof ErrorCodeHolder) {
errorCode = ((ErrorCodeHolder) throwable).getErrorCode();
}
getHttpChannel().writeResetFrame(errorCode);
}
@Override
public void request(int count) {
streamingDecoder.request(count);
}
@Override
public void disableAutoFlowControl() {
autoRequestN = false;
}
@Override
public boolean isAutoRequestN() {
return autoRequestN;
}
@Override
public void close() {
super.close();
streamingDecoder.onStreamClosed();
}
}
|
Http2ServerChannelObserver
|
java
|
google__guice
|
core/test/com/googlecode/guice/bundle/OSGiTestActivator.java
|
{
"start": 6311,
"end": 6727
}
|
class ____ implements C {
CD() {}
@Inject
public void setA(Undefined undefined) {}
@Inject
protected void setB(Undefined undefined) {}
@Inject
void setC(Undefined undefined) {}
@Inject
private void setD(Undefined undefined) {}
@Inject public Undefined a;
@Inject protected Undefined b;
@Inject Undefined c;
@Inject private Undefined d;
}
public static
|
CD
|
java
|
apache__maven
|
compat/maven-plugin-api/src/main/java/org/apache/maven/plugin/MojoNotFoundException.java
|
{
"start": 1052,
"end": 2480
}
|
class ____ extends Exception {
private String goal;
private PluginDescriptor pluginDescriptor;
public MojoNotFoundException(String goal, PluginDescriptor pluginDescriptor) {
super(toMessage(goal, pluginDescriptor));
this.goal = goal;
this.pluginDescriptor = pluginDescriptor;
}
public String getGoal() {
return goal;
}
public PluginDescriptor getPluginDescriptor() {
return pluginDescriptor;
}
private static String toMessage(String goal, PluginDescriptor pluginDescriptor) {
StringBuilder buffer = new StringBuilder(256);
buffer.append("Could not find goal '").append(goal).append('\'');
if (pluginDescriptor != null) {
buffer.append(" in plugin ").append(pluginDescriptor.getId());
buffer.append(" among available goals ");
List<MojoDescriptor> mojos = pluginDescriptor.getMojos();
if (mojos != null) {
for (Iterator<MojoDescriptor> it = mojos.iterator(); it.hasNext(); ) {
MojoDescriptor mojo = it.next();
if (mojo != null) {
buffer.append(mojo.getGoal());
}
if (it.hasNext()) {
buffer.append(", ");
}
}
}
}
return buffer.toString();
}
}
|
MojoNotFoundException
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/impl/converter/Country.java
|
{
"start": 852,
"end": 1174
}
|
class ____ {
private String iso;
private String name;
public String getIso() {
return iso;
}
public void setIso(String iso) {
this.iso = iso;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
}
|
Country
|
java
|
apache__flink
|
flink-table/flink-sql-gateway/src/test/java/org/apache/flink/table/gateway/rest/SqlGatewayRestEndpointITCase.java
|
{
"start": 17510,
"end": 17724
}
|
class ____ implements RequestBody {
public final int id;
@JsonCreator
public TestRequest(@JsonProperty("id") int id) {
this.id = id;
}
}
private static
|
TestRequest
|
java
|
apache__camel
|
core/camel-core-model/src/main/java/org/apache/camel/model/dataformat/RssDataFormat.java
|
{
"start": 1429,
"end": 1850
}
|
class ____ extends DataFormatDefinition {
public RssDataFormat() {
super("rss");
}
protected RssDataFormat(RssDataFormat source) {
super(source);
}
@Override
public RssDataFormat copyDefinition() {
return new RssDataFormat(this);
}
/**
* {@code Builder} is a specific builder for {@link RssDataFormat}.
*/
@XmlTransient
public static
|
RssDataFormat
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/StatementSwitchToExpressionSwitchTest.java
|
{
"start": 26554,
"end": 26941
}
|
class ____ {
void foo(int value) {
switch (value) {
case 0 -> {}
default -> {}
}
}
}
""")
.doTest();
}
@Test
public void nonEmptyExpressionSwitchCases_noMatch() {
helper
.addSourceLines(
"Test.java",
"""
|
Test
|
java
|
apache__flink
|
flink-runtime/src/test/java/org/apache/flink/streaming/api/TypeFillTest.java
|
{
"start": 7667,
"end": 7740
}
|
class ____<T> extends AbstractTestSource<T> {}
private static
|
TestSource
|
java
|
apache__flink
|
flink-rpc/flink-rpc-akka/src/test/java/org/apache/flink/runtime/rpc/pekko/TimeoutCallStackTest.java
|
{
"start": 4755,
"end": 5215
}
|
class ____ extends RpcEndpoint implements TestingGateway {
TestingRpcEndpoint(RpcService rpcService, String endpointId) {
super(rpcService, endpointId);
}
@Override
public CompletableFuture<Void> callThatTimesOut(@RpcTimeout Duration timeout) {
// return a future that never completes, so the call is guaranteed to time out
return new CompletableFuture<>();
}
}
}
|
TestingRpcEndpoint
|
java
|
apache__hadoop
|
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
|
{
"start": 1708,
"end": 7425
}
|
class ____ {
public static final Logger LOG =
LoggerFactory.getLogger(BlockStorageMovementNeeded.class);
private final Queue<ItemInfo> storageMovementNeeded =
new LinkedList<ItemInfo>();
/**
* Map of startPath and number of child's. Number of child's indicate the
* number of files pending to satisfy the policy.
*/
private final Map<Long, DirPendingWorkInfo> pendingWorkForDirectory =
new HashMap<>();
private final Context ctxt;
private Daemon pathIdCollector;
private SPSPathIdProcessor pathIDProcessor;
// Amount of time to cache the SUCCESS status of path before turning it to
// NOT_AVAILABLE.
private static long statusClearanceElapsedTimeMs = 300000;
public BlockStorageMovementNeeded(Context context) {
this.ctxt = context;
pathIDProcessor = new SPSPathIdProcessor();
}
/**
* Add the candidate to tracking list for which storage movement
* expected if necessary.
*
* @param trackInfo
* - track info for satisfy the policy
*/
public synchronized void add(ItemInfo trackInfo) {
if (trackInfo != null) {
storageMovementNeeded.add(trackInfo);
}
}
/**
* Add the itemInfo list to tracking list for which storage movement expected
* if necessary.
*
* @param startPath
* - start path
* @param itemInfoList
* - List of child in the directory
* @param scanCompleted
* -Indicates whether the start id directory has no more elements to
* scan.
*/
@VisibleForTesting
public synchronized void addAll(long startPath, List<ItemInfo> itemInfoList,
boolean scanCompleted) {
storageMovementNeeded.addAll(itemInfoList);
updatePendingDirScanStats(startPath, itemInfoList.size(), scanCompleted);
}
/**
* Add the itemInfo to tracking list for which storage movement expected if
* necessary.
*
* @param itemInfo
* - child in the directory
* @param scanCompleted
* -Indicates whether the ItemInfo start id directory has no more
* elements to scan.
*/
@VisibleForTesting
public synchronized void add(ItemInfo itemInfo, boolean scanCompleted) {
storageMovementNeeded.add(itemInfo);
// This represents sps start id is file, so no need to update pending dir
// stats.
if (itemInfo.getStartPath() == itemInfo.getFile()) {
return;
}
updatePendingDirScanStats(itemInfo.getStartPath(), 1, scanCompleted);
}
private void updatePendingDirScanStats(long startPath, int numScannedFiles,
boolean scanCompleted) {
DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startPath);
if (pendingWork == null) {
pendingWork = new DirPendingWorkInfo();
pendingWorkForDirectory.put(startPath, pendingWork);
}
pendingWork.addPendingWorkCount(numScannedFiles);
if (scanCompleted) {
pendingWork.markScanCompleted();
}
}
/**
* Gets the satisfier files for which block storage movements check necessary
* and make the movement if required.
*
* @return satisfier files
*/
public synchronized ItemInfo get() {
return storageMovementNeeded.poll();
}
/**
* Returns queue size.
*/
public synchronized int size() {
return storageMovementNeeded.size();
}
public synchronized void clearAll() {
storageMovementNeeded.clear();
pendingWorkForDirectory.clear();
}
/**
* Decrease the pending child count for directory once one file blocks moved
* successfully. Remove the SPS xAttr if pending child count is zero.
*/
public synchronized void removeItemTrackInfo(ItemInfo trackInfo,
boolean isSuccess) throws IOException {
if (trackInfo.isDir()) {
// If track is part of some start inode then reduce the pending
// directory work count.
long startId = trackInfo.getStartPath();
if (!ctxt.isFileExist(startId)) {
// directory deleted just remove it.
this.pendingWorkForDirectory.remove(startId);
} else {
DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
if (pendingWork != null) {
pendingWork.decrementPendingWorkCount();
if (pendingWork.isDirWorkDone()) {
ctxt.removeSPSHint(startId);
pendingWorkForDirectory.remove(startId);
}
}
}
} else {
// Remove xAttr if trackID doesn't exist in
// storageMovementAttemptedItems or file policy satisfied.
ctxt.removeSPSHint(trackInfo.getFile());
}
}
/**
* Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded
* and notify to clean up required resources.
*/
public synchronized void clearQueuesWithNotification() {
// Remove xAttr from directories
Long trackId;
while ((trackId = ctxt.getNextSPSPath()) != null) {
try {
// Remove xAttr for file
ctxt.removeSPSHint(trackId);
} catch (IOException ie) {
LOG.warn("Failed to remove SPS xattr for track id " + trackId, ie);
}
}
// File's directly added to storageMovementNeeded, So try to remove
// xAttr for file
ItemInfo itemInfo;
while ((itemInfo = get()) != null) {
try {
// Remove xAttr for file
if (!itemInfo.isDir()) {
ctxt.removeSPSHint(itemInfo.getFile());
}
} catch (IOException ie) {
LOG.warn(
"Failed to remove SPS xattr for track id "
+ itemInfo.getFile(), ie);
}
}
this.clearAll();
}
/**
* Take dir tack ID from the spsDirsToBeTraveresed queue and collect child
* ID's to process for satisfy the policy.
*/
private
|
BlockStorageMovementNeeded
|
java
|
elastic__elasticsearch
|
x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/core/Streams.java
|
{
"start": 545,
"end": 2443
}
|
class ____ {
/**
* Copy the contents of the given InputStream to the given OutputStream. Optionally, closes both streams when done.
*
* @param in the stream to copy from
* @param out the stream to copy to
* @param close whether to close both streams after copying
* @param buffer buffer to use for copying
* @return the number of bytes copied
* @throws IOException in case of I/O errors
*/
public static long copy(final InputStream in, final OutputStream out, byte[] buffer, boolean close) throws IOException {
Exception err = null;
try {
long byteCount = 0;
int bytesRead;
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
byteCount += bytesRead;
}
out.flush();
return byteCount;
} catch (IOException | RuntimeException e) {
err = e;
throw e;
} finally {
if (close) {
IOUtils.close(err, in, out);
}
}
}
private static byte[] buffer() {
return new byte[8 * 1024];
}
/**
* @see #copy(InputStream, OutputStream, byte[], boolean)
*/
public static long copy(final InputStream in, final OutputStream out, boolean close) throws IOException {
return copy(in, out, buffer(), close);
}
/**
* @see #copy(InputStream, OutputStream, byte[], boolean)
*/
public static long copy(final InputStream in, final OutputStream out, byte[] buffer) throws IOException {
return copy(in, out, buffer, true);
}
/**
* @see #copy(InputStream, OutputStream, byte[], boolean)
*/
public static long copy(final InputStream in, final OutputStream out) throws IOException {
return copy(in, out, buffer(), true);
}
}
|
Streams
|
java
|
apache__spark
|
examples/src/main/java/org/apache/spark/examples/ml/JavaPowerIterationClusteringExample.java
|
{
"start": 1329,
"end": 2467
}
|
class ____ {
public static void main(String[] args) {
// Create a SparkSession.
SparkSession spark = SparkSession
.builder()
.appName("JavaPowerIterationClustering")
.getOrCreate();
// $example on$
List<Row> data = Arrays.asList(
RowFactory.create(0L, 1L, 1.0),
RowFactory.create(0L, 2L, 1.0),
RowFactory.create(1L, 2L, 1.0),
RowFactory.create(3L, 4L, 1.0),
RowFactory.create(4L, 0L, 0.1)
);
StructType schema = new StructType(new StructField[]{
new StructField("src", DataTypes.LongType, false, Metadata.empty()),
new StructField("dst", DataTypes.LongType, false, Metadata.empty()),
new StructField("weight", DataTypes.DoubleType, false, Metadata.empty())
});
Dataset<Row> df = spark.createDataFrame(data, schema);
PowerIterationClustering model = new PowerIterationClustering()
.setK(2)
.setMaxIter(10)
.setInitMode("degree")
.setWeightCol("weight");
Dataset<Row> result = model.assignClusters(df);
result.show(false);
// $example off$
spark.stop();
}
}
|
JavaPowerIterationClusteringExample
|
java
|
netty__netty
|
codec-compression/src/main/java/io/netty/handler/codec/compression/CompressionOptions.java
|
{
"start": 935,
"end": 981
}
|
interface ____ {
// Empty
}
|
CompressionOptions
|
java
|
spring-projects__spring-security
|
core/src/main/java/org/springframework/security/concurrent/AbstractDelegatingSecurityContextSupport.java
|
{
"start": 1250,
"end": 2625
}
|
class ____ {
private SecurityContextHolderStrategy securityContextHolderStrategy = SecurityContextHolder
.getContextHolderStrategy();
private final @Nullable SecurityContext securityContext;
/**
* Creates a new {@link AbstractDelegatingSecurityContextSupport} that uses the
* specified {@link SecurityContext}.
* @param securityContext the {@link SecurityContext} to use for each
* {@link DelegatingSecurityContextRunnable} and each
* {@link DelegatingSecurityContextCallable} or null to default to the current
* {@link SecurityContext}.
*/
AbstractDelegatingSecurityContextSupport(@Nullable SecurityContext securityContext) {
this.securityContext = securityContext;
}
void setSecurityContextHolderStrategy(SecurityContextHolderStrategy securityContextHolderStrategy) {
Assert.notNull(securityContextHolderStrategy, "securityContextHolderStrategy cannot be null");
this.securityContextHolderStrategy = securityContextHolderStrategy;
}
protected final Runnable wrap(Runnable delegate) {
return DelegatingSecurityContextRunnable.create(delegate, this.securityContext,
this.securityContextHolderStrategy);
}
protected final <T> Callable<T> wrap(Callable<T> delegate) {
return DelegatingSecurityContextCallable.create(delegate, this.securityContext,
this.securityContextHolderStrategy);
}
}
|
AbstractDelegatingSecurityContextSupport
|
java
|
apache__rocketmq
|
client/src/main/java/org/apache/rocketmq/client/impl/consumer/DefaultMQPushConsumerImpl.java
|
{
"start": 24708,
"end": 75299
}
|
class ____
);
try {
this.pullAPIWrapper.pullKernelImpl(
pullRequest.getMessageQueue(),
subExpression,
subscriptionData.getExpressionType(),
subscriptionData.getSubVersion(),
pullRequest.getNextOffset(),
this.defaultMQPushConsumer.getPullBatchSize(),
this.defaultMQPushConsumer.getPullBatchSizeInBytes(),
sysFlag,
commitOffsetValue,
BROKER_SUSPEND_MAX_TIME_MILLIS,
CONSUMER_TIMEOUT_MILLIS_WHEN_SUSPEND,
CommunicationMode.ASYNC,
pullCallback
);
} catch (Exception e) {
log.error("pullKernelImpl exception", e);
this.executePullRequestLater(pullRequest, pullTimeDelayMillsWhenException);
}
}
void popMessage(final PopRequest popRequest) {
final PopProcessQueue processQueue = popRequest.getPopProcessQueue();
if (processQueue.isDropped()) {
log.info("the pop request[{}] is dropped.", popRequest.toString());
return;
}
processQueue.setLastPopTimestamp(System.currentTimeMillis());
try {
this.makeSureStateOK();
} catch (MQClientException e) {
log.warn("popMessage exception, consumer state not ok", e);
this.executePopPullRequestLater(popRequest, pullTimeDelayMillsWhenException);
return;
}
if (this.isPause()) {
log.warn("consumer was paused, execute pull request later. instanceName={}, group={}", this.defaultMQPushConsumer.getInstanceName(), this.defaultMQPushConsumer.getConsumerGroup());
this.executePopPullRequestLater(popRequest, PULL_TIME_DELAY_MILLS_WHEN_SUSPEND);
return;
}
if (processQueue.getWaiAckMsgCount() > this.defaultMQPushConsumer.getPopThresholdForQueue()) {
this.executePopPullRequestLater(popRequest, PULL_TIME_DELAY_MILLS_WHEN_CACHE_FLOW_CONTROL);
if ((queueFlowControlTimes++ % 1000) == 0) {
log.warn("the messages waiting to ack exceeds the threshold {}, so do flow control, popRequest={}, flowControlTimes={}, wait count={}",
this.defaultMQPushConsumer.getPopThresholdForQueue(), popRequest, queueFlowControlTimes, processQueue.getWaiAckMsgCount());
}
return;
}
//POPTODO think of pop mode orderly implementation later.
final SubscriptionData subscriptionData = this.rebalanceImpl.getSubscriptionInner().get(popRequest.getMessageQueue().getTopic());
if (null == subscriptionData) {
this.executePopPullRequestLater(popRequest, pullTimeDelayMillsWhenException);
log.warn("find the consumer's subscription failed, {}", popRequest);
return;
}
final long beginTimestamp = System.currentTimeMillis();
PopCallback popCallback = new PopCallback() {
@Override
public void onSuccess(PopResult popResult) {
if (popResult == null) {
log.error("pop callback popResult is null");
DefaultMQPushConsumerImpl.this.executePopPullRequestImmediately(popRequest);
return;
}
processPopResult(popResult, subscriptionData);
switch (popResult.getPopStatus()) {
case FOUND:
long pullRT = System.currentTimeMillis() - beginTimestamp;
DefaultMQPushConsumerImpl.this.getConsumerStatsManager().incPullRT(popRequest.getConsumerGroup(),
popRequest.getMessageQueue().getTopic(), pullRT);
if (popResult.getMsgFoundList() == null || popResult.getMsgFoundList().isEmpty()) {
DefaultMQPushConsumerImpl.this.executePopPullRequestImmediately(popRequest);
} else {
DefaultMQPushConsumerImpl.this.getConsumerStatsManager().incPullTPS(popRequest.getConsumerGroup(),
popRequest.getMessageQueue().getTopic(), popResult.getMsgFoundList().size());
popRequest.getPopProcessQueue().incFoundMsg(popResult.getMsgFoundList().size());
DefaultMQPushConsumerImpl.this.consumeMessagePopService.submitPopConsumeRequest(
popResult.getMsgFoundList(),
processQueue,
popRequest.getMessageQueue());
if (DefaultMQPushConsumerImpl.this.defaultMQPushConsumer.getPullInterval() > 0) {
DefaultMQPushConsumerImpl.this.executePopPullRequestLater(popRequest,
DefaultMQPushConsumerImpl.this.defaultMQPushConsumer.getPullInterval());
} else {
DefaultMQPushConsumerImpl.this.executePopPullRequestImmediately(popRequest);
}
}
break;
case NO_NEW_MSG:
case POLLING_NOT_FOUND:
DefaultMQPushConsumerImpl.this.executePopPullRequestImmediately(popRequest);
break;
case POLLING_FULL:
default:
DefaultMQPushConsumerImpl.this.executePopPullRequestLater(popRequest, pullTimeDelayMillsWhenException);
break;
}
}
@Override
public void onException(Throwable e) {
if (!popRequest.getMessageQueue().getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) {
log.warn("execute the pull request exception: {}", e);
}
if (e instanceof MQBrokerException && ((MQBrokerException) e).getResponseCode() == ResponseCode.FLOW_CONTROL) {
DefaultMQPushConsumerImpl.this.executePopPullRequestLater(popRequest, PULL_TIME_DELAY_MILLS_WHEN_BROKER_FLOW_CONTROL);
} else {
DefaultMQPushConsumerImpl.this.executePopPullRequestLater(popRequest, pullTimeDelayMillsWhenException);
}
}
};
try {
long invisibleTime = this.defaultMQPushConsumer.getPopInvisibleTime();
if (invisibleTime < MIN_POP_INVISIBLE_TIME || invisibleTime > MAX_POP_INVISIBLE_TIME) {
invisibleTime = 60000;
}
this.pullAPIWrapper.popAsync(popRequest.getMessageQueue(), invisibleTime, this.defaultMQPushConsumer.getPopBatchNums(),
popRequest.getConsumerGroup(), BROKER_SUSPEND_MAX_TIME_MILLIS, popCallback, true, popRequest.getInitMode(),
false, subscriptionData.getExpressionType(), subscriptionData.getSubString());
} catch (Exception e) {
log.error("popAsync exception", e);
this.executePopPullRequestLater(popRequest, pullTimeDelayMillsWhenException);
}
}
private PopResult processPopResult(final PopResult popResult, final SubscriptionData subscriptionData) {
if (PopStatus.FOUND == popResult.getPopStatus()) {
List<MessageExt> msgFoundList = popResult.getMsgFoundList();
List<MessageExt> msgListFilterAgain = new ArrayList<>(popResult.getMsgFoundList().size());
if (!subscriptionData.getTagsSet().isEmpty() && !subscriptionData.isClassFilterMode()
&& popResult.getMsgFoundList().size() > 0) {
for (MessageExt msg : popResult.getMsgFoundList()) {
if (msg.getTags() != null) {
if (subscriptionData.getTagsSet().contains(msg.getTags())) {
msgListFilterAgain.add(msg);
}
}
}
} else {
msgListFilterAgain.addAll(msgFoundList);
}
if (!this.filterMessageHookList.isEmpty()) {
FilterMessageContext filterMessageContext = new FilterMessageContext();
filterMessageContext.setUnitMode(this.defaultMQPushConsumer.isUnitMode());
filterMessageContext.setMsgList(msgListFilterAgain);
if (!this.filterMessageHookList.isEmpty()) {
for (FilterMessageHook hook : this.filterMessageHookList) {
try {
hook.filterMessage(filterMessageContext);
} catch (Throwable e) {
log.error("execute hook error. hookName={}", hook.hookName());
}
}
}
}
Iterator<MessageExt> iterator = msgListFilterAgain.iterator();
while (iterator.hasNext()) {
MessageExt msg = iterator.next();
if (msg.getReconsumeTimes() > getMaxReconsumeTimes()) {
iterator.remove();
log.info("Reconsume times has reached {}, so ack msg={}", msg.getReconsumeTimes(), msg);
}
}
if (msgFoundList.size() != msgListFilterAgain.size()) {
for (MessageExt msg : msgFoundList) {
if (!msgListFilterAgain.contains(msg)) {
ackAsync(msg, this.groupName());
}
}
}
popResult.setMsgFoundList(msgListFilterAgain);
}
return popResult;
}
private void makeSureStateOK() throws MQClientException {
if (this.serviceState != ServiceState.RUNNING) {
throw new MQClientException("The consumer service state not OK, "
+ this.serviceState
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_SERVICE_NOT_OK),
null);
}
}
void executePullRequestLater(final PullRequest pullRequest, final long timeDelay) {
this.mQClientFactory.getPullMessageService().executePullRequestLater(pullRequest, timeDelay);
}
public boolean isPause() {
return pause;
}
public void setPause(boolean pause) {
this.pause = pause;
}
public ConsumerStatsManager getConsumerStatsManager() {
return this.mQClientFactory.getConsumerStatsManager();
}
public void executePullRequestImmediately(final PullRequest pullRequest) {
this.mQClientFactory.getPullMessageService().executePullRequestImmediately(pullRequest);
}
void executePopPullRequestLater(final PopRequest pullRequest, final long timeDelay) {
this.mQClientFactory.getPullMessageService().executePopPullRequestLater(pullRequest, timeDelay);
}
void executePopPullRequestImmediately(final PopRequest pullRequest) {
this.mQClientFactory.getPullMessageService().executePopPullRequestImmediately(pullRequest);
}
private void correctTagsOffset(final PullRequest pullRequest) {
if (0L == pullRequest.getProcessQueue().getMsgCount().get()) {
this.offsetStore.updateOffset(pullRequest.getMessageQueue(), pullRequest.getNextOffset(), true);
}
}
public void executeTaskLater(final Runnable r, final long timeDelay) {
this.mQClientFactory.getPullMessageService().executeTaskLater(r, timeDelay);
}
public void executeTask(final Runnable r) {
this.mQClientFactory.getPullMessageService().executeTask(r);
}
public QueryResult queryMessage(String topic, String key, int maxNum, long begin, long end)
throws MQClientException, InterruptedException {
return this.mQClientFactory.getMQAdminImpl().queryMessage(topic, key, maxNum, begin, end);
}
public MessageExt queryMessageByUniqKey(String topic, String uniqKey) throws MQClientException,
InterruptedException {
return this.mQClientFactory.getMQAdminImpl().queryMessageByUniqKey(topic, uniqKey);
}
public void registerMessageListener(MessageListener messageListener) {
this.messageListenerInner = messageListener;
}
public void resume() {
this.pause = false;
doRebalance();
log.info("resume this consumer, {}", this.defaultMQPushConsumer.getConsumerGroup());
}
@Deprecated
public void sendMessageBack(MessageExt msg, int delayLevel, final String brokerName)
throws RemotingException, MQBrokerException, InterruptedException, MQClientException {
sendMessageBack(msg, delayLevel, brokerName, null);
}
public void sendMessageBack(MessageExt msg, int delayLevel, final MessageQueue mq)
throws RemotingException, MQBrokerException, InterruptedException, MQClientException {
sendMessageBack(msg, delayLevel, msg.getBrokerName(), mq);
}
private void sendMessageBack(MessageExt msg, int delayLevel, final String brokerName, final MessageQueue mq)
throws RemotingException, MQBrokerException, InterruptedException, MQClientException {
boolean needRetry = true;
try {
if (brokerName != null && brokerName.startsWith(MixAll.LOGICAL_QUEUE_MOCK_BROKER_PREFIX)
|| mq != null && mq.getBrokerName().startsWith(MixAll.LOGICAL_QUEUE_MOCK_BROKER_PREFIX)) {
needRetry = false;
sendMessageBackAsNormalMessage(msg);
} else {
String brokerAddr = (null != brokerName) ? this.mQClientFactory.findBrokerAddressInPublish(brokerName)
: RemotingHelper.parseSocketAddressAddr(msg.getStoreHost());
if (UtilAll.isBlank(brokerAddr)) {
throw new MQClientException("Broker[" + brokerName + "] master node does not exist", null);
}
this.mQClientFactory.getMQClientAPIImpl().consumerSendMessageBack(brokerAddr, brokerName, msg,
this.defaultMQPushConsumer.getConsumerGroup(), delayLevel, 5000, getMaxReconsumeTimes());
}
} catch (Throwable t) {
log.error("Failed to send message back, consumerGroup={}, brokerName={}, mq={}, message={}",
this.defaultMQPushConsumer.getConsumerGroup(), brokerName, mq, msg, t);
if (needRetry) {
sendMessageBackAsNormalMessage(msg);
}
} finally {
msg.setTopic(NamespaceUtil.withoutNamespace(msg.getTopic(), this.defaultMQPushConsumer.getNamespace()));
}
}
private void sendMessageBackAsNormalMessage(MessageExt msg) throws RemotingException, MQBrokerException, InterruptedException, MQClientException {
Message newMsg = new Message(MixAll.getRetryTopic(this.defaultMQPushConsumer.getConsumerGroup()), msg.getBody());
MessageAccessor.setProperties(newMsg, msg.getProperties());
String originMsgId = MessageAccessor.getOriginMessageId(msg);
MessageAccessor.setOriginMessageId(newMsg, UtilAll.isBlank(originMsgId) ? msg.getMsgId() : originMsgId);
newMsg.setFlag(msg.getFlag());
MessageAccessor.putProperty(newMsg, MessageConst.PROPERTY_RETRY_TOPIC, msg.getTopic());
MessageAccessor.setReconsumeTime(newMsg, String.valueOf(msg.getReconsumeTimes() + 1));
MessageAccessor.setMaxReconsumeTimes(newMsg, String.valueOf(getMaxReconsumeTimes()));
MessageAccessor.clearProperty(newMsg, MessageConst.PROPERTY_TRANSACTION_PREPARED);
newMsg.setDelayTimeLevel(3 + msg.getReconsumeTimes());
this.mQClientFactory.getDefaultMQProducer().send(newMsg);
}
void ackAsync(MessageExt message, String consumerGroup) {
final String extraInfo = message.getProperty(MessageConst.PROPERTY_POP_CK);
try {
String[] extraInfoStrs = ExtraInfoUtil.split(extraInfo);
String brokerName = ExtraInfoUtil.getBrokerName(extraInfoStrs);
int queueId = ExtraInfoUtil.getQueueId(extraInfoStrs);
long queueOffset = ExtraInfoUtil.getQueueOffset(extraInfoStrs);
String topic = message.getTopic();
String desBrokerName = brokerName;
if (brokerName != null && brokerName.startsWith(MixAll.LOGICAL_QUEUE_MOCK_BROKER_PREFIX)) {
desBrokerName = this.mQClientFactory.getBrokerNameFromMessageQueue(this.defaultMQPushConsumer.queueWithNamespace(new MessageQueue(topic, brokerName, queueId)));
}
FindBrokerResult
findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(desBrokerName, MixAll.MASTER_ID, true);
if (null == findBrokerResult) {
this.mQClientFactory.updateTopicRouteInfoFromNameServer(topic);
findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(desBrokerName, MixAll.MASTER_ID, true);
}
if (findBrokerResult == null) {
log.error("The broker[" + desBrokerName + "] not exist");
return;
}
AckMessageRequestHeader requestHeader = new AckMessageRequestHeader();
requestHeader.setTopic(ExtraInfoUtil.getRealTopic(extraInfoStrs, topic, consumerGroup));
requestHeader.setQueueId(queueId);
requestHeader.setOffset(queueOffset);
requestHeader.setConsumerGroup(consumerGroup);
requestHeader.setExtraInfo(extraInfo);
requestHeader.setBrokerName(brokerName);
this.mQClientFactory.getMQClientAPIImpl().ackMessageAsync(findBrokerResult.getBrokerAddr(), ASYNC_TIMEOUT, new AckCallback() {
@Override
public void onSuccess(AckResult ackResult) {
if (ackResult != null && !AckStatus.OK.equals(ackResult.getStatus())) {
log.warn("Ack message fail. ackResult: {}, extraInfo: {}", ackResult, extraInfo);
}
}
@Override
public void onException(Throwable e) {
log.warn("Ack message fail. extraInfo: {} error message: {}", extraInfo, e.toString());
}
}, requestHeader);
} catch (Throwable t) {
log.error("ack async error.", t);
}
}
void changePopInvisibleTimeAsync(String topic, String consumerGroup, String extraInfo, long invisibleTime, AckCallback callback)
throws MQClientException, RemotingException, InterruptedException, MQBrokerException {
String[] extraInfoStrs = ExtraInfoUtil.split(extraInfo);
String brokerName = ExtraInfoUtil.getBrokerName(extraInfoStrs);
int queueId = ExtraInfoUtil.getQueueId(extraInfoStrs);
String desBrokerName = brokerName;
if (brokerName != null && brokerName.startsWith(MixAll.LOGICAL_QUEUE_MOCK_BROKER_PREFIX)) {
desBrokerName = this.mQClientFactory.getBrokerNameFromMessageQueue(this.defaultMQPushConsumer.queueWithNamespace(new MessageQueue(topic, brokerName, queueId)));
}
FindBrokerResult
findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(desBrokerName, MixAll.MASTER_ID, true);
if (null == findBrokerResult) {
this.mQClientFactory.updateTopicRouteInfoFromNameServer(topic);
findBrokerResult = this.mQClientFactory.findBrokerAddressInSubscribe(desBrokerName, MixAll.MASTER_ID, true);
}
if (findBrokerResult != null) {
ChangeInvisibleTimeRequestHeader requestHeader = new ChangeInvisibleTimeRequestHeader();
requestHeader.setTopic(ExtraInfoUtil.getRealTopic(extraInfoStrs, topic, consumerGroup));
requestHeader.setQueueId(queueId);
requestHeader.setOffset(ExtraInfoUtil.getQueueOffset(extraInfoStrs));
requestHeader.setConsumerGroup(consumerGroup);
requestHeader.setExtraInfo(extraInfo);
requestHeader.setInvisibleTime(invisibleTime);
requestHeader.setBrokerName(brokerName);
//here the broker should be polished
this.mQClientFactory.getMQClientAPIImpl().changeInvisibleTimeAsync(brokerName, findBrokerResult.getBrokerAddr(), requestHeader, ASYNC_TIMEOUT, callback);
return;
}
throw new MQClientException("The broker[" + desBrokerName + "] not exist", null);
}
public int getMaxReconsumeTimes() {
// default reconsume times: 16
if (this.defaultMQPushConsumer.getMaxReconsumeTimes() == -1) {
return 16;
} else {
return this.defaultMQPushConsumer.getMaxReconsumeTimes();
}
}
public void shutdown() {
shutdown(0);
}
public synchronized void shutdown(long awaitTerminateMillis) {
switch (this.serviceState) {
case CREATE_JUST:
break;
case RUNNING:
this.consumeMessageService.shutdown(awaitTerminateMillis);
this.persistConsumerOffset();
this.mQClientFactory.unregisterConsumer(this.defaultMQPushConsumer.getConsumerGroup());
this.mQClientFactory.shutdown();
log.info("the consumer [{}] shutdown OK", this.defaultMQPushConsumer.getConsumerGroup());
this.rebalanceImpl.destroy();
this.serviceState = ServiceState.SHUTDOWN_ALREADY;
break;
case SHUTDOWN_ALREADY:
break;
default:
break;
}
}
public synchronized void start() throws MQClientException {
switch (this.serviceState) {
case CREATE_JUST:
log.info("the consumer [{}] start beginning. messageModel={}, isUnitMode={}", this.defaultMQPushConsumer.getConsumerGroup(),
this.defaultMQPushConsumer.getMessageModel(), this.defaultMQPushConsumer.isUnitMode());
this.serviceState = ServiceState.START_FAILED;
this.checkConfig();
this.copySubscription();
if (this.defaultMQPushConsumer.getMessageModel() == MessageModel.CLUSTERING) {
this.defaultMQPushConsumer.changeInstanceNameToPID();
}
this.mQClientFactory = MQClientManager.getInstance().getOrCreateMQClientInstance(this.defaultMQPushConsumer, this.rpcHook);
this.rebalanceImpl.setConsumerGroup(this.defaultMQPushConsumer.getConsumerGroup());
this.rebalanceImpl.setMessageModel(this.defaultMQPushConsumer.getMessageModel());
this.rebalanceImpl.setAllocateMessageQueueStrategy(this.defaultMQPushConsumer.getAllocateMessageQueueStrategy());
this.rebalanceImpl.setmQClientFactory(this.mQClientFactory);
if (this.pullAPIWrapper == null) {
this.pullAPIWrapper = new PullAPIWrapper(
mQClientFactory,
this.defaultMQPushConsumer.getConsumerGroup(), isUnitMode());
}
this.pullAPIWrapper.registerFilterMessageHook(filterMessageHookList);
if (this.defaultMQPushConsumer.getOffsetStore() != null) {
this.offsetStore = this.defaultMQPushConsumer.getOffsetStore();
} else {
switch (this.defaultMQPushConsumer.getMessageModel()) {
case BROADCASTING:
this.offsetStore = new LocalFileOffsetStore(this.mQClientFactory, this.defaultMQPushConsumer.getConsumerGroup());
break;
case CLUSTERING:
this.offsetStore = new RemoteBrokerOffsetStore(this.mQClientFactory, this.defaultMQPushConsumer.getConsumerGroup());
break;
default:
break;
}
this.defaultMQPushConsumer.setOffsetStore(this.offsetStore);
}
this.offsetStore.load();
if (this.getMessageListenerInner() instanceof MessageListenerOrderly) {
this.consumeOrderly = true;
this.consumeMessageService =
new ConsumeMessageOrderlyService(this, (MessageListenerOrderly) this.getMessageListenerInner());
//POPTODO reuse Executor ?
this.consumeMessagePopService = new ConsumeMessagePopOrderlyService(this, (MessageListenerOrderly) this.getMessageListenerInner());
} else if (this.getMessageListenerInner() instanceof MessageListenerConcurrently) {
this.consumeOrderly = false;
this.consumeMessageService =
new ConsumeMessageConcurrentlyService(this, (MessageListenerConcurrently) this.getMessageListenerInner());
//POPTODO reuse Executor ?
this.consumeMessagePopService =
new ConsumeMessagePopConcurrentlyService(this, (MessageListenerConcurrently) this.getMessageListenerInner());
}
this.consumeMessageService.start();
// POPTODO
this.consumeMessagePopService.start();
boolean registerOK = mQClientFactory.registerConsumer(this.defaultMQPushConsumer.getConsumerGroup(), this);
if (!registerOK) {
this.serviceState = ServiceState.CREATE_JUST;
this.consumeMessageService.shutdown(defaultMQPushConsumer.getAwaitTerminationMillisWhenShutdown());
throw new MQClientException("The consumer group[" + this.defaultMQPushConsumer.getConsumerGroup()
+ "] has been created before, specify another name please." + FAQUrl.suggestTodo(FAQUrl.GROUP_NAME_DUPLICATE_URL),
null);
}
mQClientFactory.start();
log.info("the consumer [{}] start OK.", this.defaultMQPushConsumer.getConsumerGroup());
this.serviceState = ServiceState.RUNNING;
break;
case RUNNING:
case START_FAILED:
case SHUTDOWN_ALREADY:
throw new MQClientException("The PushConsumer service state not OK, maybe started once, "
+ this.serviceState
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_SERVICE_NOT_OK),
null);
default:
break;
}
try {
this.updateTopicSubscribeInfoWhenSubscriptionChanged();
this.mQClientFactory.checkClientInBroker();
if (this.mQClientFactory.sendHeartbeatToAllBrokerWithLock()) {
this.mQClientFactory.rebalanceImmediately();
}
} catch (Exception e) {
log.warn("Start the consumer {} fail.", this.defaultMQPushConsumer.getConsumerGroup(), e);
shutdown();
throw e;
}
}
private void checkConfig() throws MQClientException {
Validators.checkGroup(this.defaultMQPushConsumer.getConsumerGroup());
if (null == this.defaultMQPushConsumer.getConsumerGroup()) {
throw new MQClientException(
"consumerGroup is null"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
if (this.defaultMQPushConsumer.getConsumerGroup().equals(MixAll.DEFAULT_CONSUMER_GROUP)) {
throw new MQClientException(
"consumerGroup can not equal "
+ MixAll.DEFAULT_CONSUMER_GROUP
+ ", please specify another one."
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
if (null == this.defaultMQPushConsumer.getMessageModel()) {
throw new MQClientException(
"messageModel is null"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
if (null == this.defaultMQPushConsumer.getConsumeFromWhere()) {
throw new MQClientException(
"consumeFromWhere is null"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
Date dt = UtilAll.parseDate(this.defaultMQPushConsumer.getConsumeTimestamp(), UtilAll.YYYYMMDDHHMMSS);
if (null == dt) {
throw new MQClientException(
"consumeTimestamp is invalid, the valid format is yyyyMMddHHmmss,but received "
+ this.defaultMQPushConsumer.getConsumeTimestamp()
+ " " + FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL), null);
}
// allocateMessageQueueStrategy
if (null == this.defaultMQPushConsumer.getAllocateMessageQueueStrategy()) {
throw new MQClientException(
"allocateMessageQueueStrategy is null"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// subscription
if (null == this.defaultMQPushConsumer.getSubscription()) {
throw new MQClientException(
"subscription is null"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// messageListener
if (null == this.defaultMQPushConsumer.getMessageListener()) {
throw new MQClientException(
"messageListener is null"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
boolean orderly = this.defaultMQPushConsumer.getMessageListener() instanceof MessageListenerOrderly;
boolean concurrently = this.defaultMQPushConsumer.getMessageListener() instanceof MessageListenerConcurrently;
if (!orderly && !concurrently) {
throw new MQClientException(
"messageListener must be instanceof MessageListenerOrderly or MessageListenerConcurrently"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// consumeThreadMin
if (this.defaultMQPushConsumer.getConsumeThreadMin() < 1
|| this.defaultMQPushConsumer.getConsumeThreadMin() > 1000) {
throw new MQClientException(
"consumeThreadMin Out of range [1, 1000]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// consumeThreadMax
if (this.defaultMQPushConsumer.getConsumeThreadMax() < 1 || this.defaultMQPushConsumer.getConsumeThreadMax() > 1000) {
throw new MQClientException(
"consumeThreadMax Out of range [1, 1000]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// consumeThreadMin can't be larger than consumeThreadMax
if (this.defaultMQPushConsumer.getConsumeThreadMin() > this.defaultMQPushConsumer.getConsumeThreadMax()) {
throw new MQClientException(
"consumeThreadMin (" + this.defaultMQPushConsumer.getConsumeThreadMin() + ") "
+ "is larger than consumeThreadMax (" + this.defaultMQPushConsumer.getConsumeThreadMax() + ")",
null);
}
// consumeConcurrentlyMaxSpan
if (this.defaultMQPushConsumer.getConsumeConcurrentlyMaxSpan() < 1
|| this.defaultMQPushConsumer.getConsumeConcurrentlyMaxSpan() > 65535) {
throw new MQClientException(
"consumeConcurrentlyMaxSpan Out of range [1, 65535]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// pullThresholdForQueue
if (this.defaultMQPushConsumer.getPullThresholdForQueue() < 1 || this.defaultMQPushConsumer.getPullThresholdForQueue() > 65535) {
throw new MQClientException(
"pullThresholdForQueue Out of range [1, 65535]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// pullThresholdForTopic
if (this.defaultMQPushConsumer.getPullThresholdForTopic() != -1) {
if (this.defaultMQPushConsumer.getPullThresholdForTopic() < 1 || this.defaultMQPushConsumer.getPullThresholdForTopic() > 6553500) {
throw new MQClientException(
"pullThresholdForTopic Out of range [1, 6553500]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
}
// pullThresholdSizeForQueue
if (this.defaultMQPushConsumer.getPullThresholdSizeForQueue() < 1 || this.defaultMQPushConsumer.getPullThresholdSizeForQueue() > 1024) {
throw new MQClientException(
"pullThresholdSizeForQueue Out of range [1, 1024]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
if (this.defaultMQPushConsumer.getPullThresholdSizeForTopic() != -1) {
// pullThresholdSizeForTopic
if (this.defaultMQPushConsumer.getPullThresholdSizeForTopic() < 1 || this.defaultMQPushConsumer.getPullThresholdSizeForTopic() > 102400) {
throw new MQClientException(
"pullThresholdSizeForTopic Out of range [1, 102400]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
}
// pullInterval
if (this.defaultMQPushConsumer.getPullInterval() < 0 || this.defaultMQPushConsumer.getPullInterval() > 65535) {
throw new MQClientException(
"pullInterval Out of range [0, 65535]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// consumeMessageBatchMaxSize
if (this.defaultMQPushConsumer.getConsumeMessageBatchMaxSize() < 1
|| this.defaultMQPushConsumer.getConsumeMessageBatchMaxSize() > 1024) {
throw new MQClientException(
"consumeMessageBatchMaxSize Out of range [1, 1024]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// pullBatchSize
if (this.defaultMQPushConsumer.getPullBatchSize() < 1 || this.defaultMQPushConsumer.getPullBatchSize() > 1024) {
throw new MQClientException(
"pullBatchSize Out of range [1, 1024]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// popInvisibleTime
if (this.defaultMQPushConsumer.getPopInvisibleTime() < MIN_POP_INVISIBLE_TIME
|| this.defaultMQPushConsumer.getPopInvisibleTime() > MAX_POP_INVISIBLE_TIME) {
throw new MQClientException(
"popInvisibleTime Out of range [" + MIN_POP_INVISIBLE_TIME + ", " + MAX_POP_INVISIBLE_TIME + "]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
// popBatchNums
if (this.defaultMQPushConsumer.getPopBatchNums() <= 0 || this.defaultMQPushConsumer.getPopBatchNums() > 32) {
throw new MQClientException(
"popBatchNums Out of range [1, 32]"
+ FAQUrl.suggestTodo(FAQUrl.CLIENT_PARAMETER_CHECK_URL),
null);
}
}
private void copySubscription() throws MQClientException {
try {
Map<String, String> sub = this.defaultMQPushConsumer.getSubscription();
if (sub != null) {
for (final Map.Entry<String, String> entry : sub.entrySet()) {
final String topic = entry.getKey();
final String subString = entry.getValue();
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(topic, subString);
this.rebalanceImpl.getSubscriptionInner().put(topic, subscriptionData);
}
}
if (null == this.messageListenerInner) {
this.messageListenerInner = this.defaultMQPushConsumer.getMessageListener();
}
switch (this.defaultMQPushConsumer.getMessageModel()) {
case BROADCASTING:
break;
case CLUSTERING:
final String retryTopic = MixAll.getRetryTopic(this.defaultMQPushConsumer.getConsumerGroup());
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(retryTopic, SubscriptionData.SUB_ALL);
this.rebalanceImpl.getSubscriptionInner().put(retryTopic, subscriptionData);
break;
default:
break;
}
} catch (Exception e) {
throw new MQClientException("subscription exception", e);
}
}
public MessageListener getMessageListenerInner() {
return messageListenerInner;
}
private void updateTopicSubscribeInfoWhenSubscriptionChanged() {
if (doNotUpdateTopicSubscribeInfoWhenSubscriptionChanged) {
return;
}
Map<String, SubscriptionData> subTable = this.getSubscriptionInner();
if (subTable != null) {
for (final Map.Entry<String, SubscriptionData> entry : subTable.entrySet()) {
final String topic = entry.getKey();
this.mQClientFactory.updateTopicRouteInfoFromNameServer(topic);
}
}
}
public ConcurrentMap<String, SubscriptionData> getSubscriptionInner() {
return this.rebalanceImpl.getSubscriptionInner();
}
public void subscribe(String topic, String subExpression) throws MQClientException {
try {
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(topic, subExpression);
this.rebalanceImpl.getSubscriptionInner().put(topic, subscriptionData);
if (this.mQClientFactory != null) {
this.mQClientFactory.sendHeartbeatToAllBrokerWithLock();
}
} catch (Exception e) {
throw new MQClientException("subscription exception", e);
}
}
public void subscribe(String topic, String fullClassName, String filterClassSource) throws MQClientException {
try {
SubscriptionData subscriptionData = FilterAPI.buildSubscriptionData(topic, SubscriptionData.SUB_ALL);
subscriptionData.setSubString(fullClassName);
subscriptionData.setClassFilterMode(true);
subscriptionData.setFilterClassSource(filterClassSource);
this.rebalanceImpl.getSubscriptionInner().put(topic, subscriptionData);
if (this.mQClientFactory != null) {
this.mQClientFactory.sendHeartbeatToAllBrokerWithLock();
}
} catch (Exception e) {
throw new MQClientException("subscription exception", e);
}
}
public void subscribe(final String topic, final MessageSelector messageSelector) throws MQClientException {
try {
if (messageSelector == null) {
subscribe(topic, SubscriptionData.SUB_ALL);
return;
}
SubscriptionData subscriptionData = FilterAPI.build(topic,
messageSelector.getExpression(), messageSelector.getExpressionType());
this.rebalanceImpl.getSubscriptionInner().put(topic, subscriptionData);
if (this.mQClientFactory != null) {
this.mQClientFactory.sendHeartbeatToAllBrokerWithLock();
}
} catch (Exception e) {
throw new MQClientException("subscription exception", e);
}
}
public void suspend() {
this.pause = true;
log.info("suspend this consumer, {}", this.defaultMQPushConsumer.getConsumerGroup());
}
public void unsubscribe(String topic) {
this.rebalanceImpl.getSubscriptionInner().remove(topic);
}
public void updateConsumeOffset(MessageQueue mq, long offset) {
this.offsetStore.updateOffset(mq, offset, false);
}
public void updateCorePoolSize(int corePoolSize) {
this.consumeMessageService.updateCorePoolSize(corePoolSize);
}
public MessageExt viewMessage(String topic, String msgId)
throws RemotingException, MQBrokerException, InterruptedException, MQClientException {
return this.mQClientFactory.getMQAdminImpl().viewMessage(topic, msgId);
}
public RebalanceImpl getRebalanceImpl() {
return rebalanceImpl;
}
public boolean isConsumeOrderly() {
return consumeOrderly;
}
public void setConsumeOrderly(boolean consumeOrderly) {
this.consumeOrderly = consumeOrderly;
}
public void resetOffsetByTimeStamp(long timeStamp) throws MQClientException {
for (String topic : rebalanceImpl.getSubscriptionInner().keySet()) {
Set<MessageQueue> mqs = rebalanceImpl.getTopicSubscribeInfoTable().get(topic);
if (CollectionUtils.isNotEmpty(mqs)) {
Map<MessageQueue, Long> offsetTable = new HashMap<>(mqs.size(), 1);
for (MessageQueue mq : mqs) {
long offset = searchOffset(mq, timeStamp);
offsetTable.put(mq, offset);
}
this.mQClientFactory.resetOffset(topic, groupName(), offsetTable);
}
}
}
public long searchOffset(MessageQueue mq, long timestamp) throws MQClientException {
return this.mQClientFactory.getMQAdminImpl().searchOffset(mq, timestamp);
}
@Override
public String groupName() {
return this.defaultMQPushConsumer.getConsumerGroup();
}
@Override
public MessageModel messageModel() {
return this.defaultMQPushConsumer.getMessageModel();
}
@Override
public ConsumeType consumeType() {
return ConsumeType.CONSUME_PASSIVELY;
}
@Override
public ConsumeFromWhere consumeFromWhere() {
return this.defaultMQPushConsumer.getConsumeFromWhere();
}
@Override
public Set<SubscriptionData> subscriptions() {
return new HashSet<>(this.rebalanceImpl.getSubscriptionInner().values());
}
@Override
public void doRebalance() {
if (!this.pause) {
this.rebalanceImpl.doRebalance(this.isConsumeOrderly());
}
}
@Override
public boolean tryRebalance() {
if (!this.pause) {
return this.rebalanceImpl.doRebalance(this.isConsumeOrderly());
}
return false;
}
@Override
public void persistConsumerOffset() {
try {
this.makeSureStateOK();
Set<MessageQueue> mqs = new HashSet<>();
Set<MessageQueue> allocateMq = this.rebalanceImpl.getProcessQueueTable().keySet();
mqs.addAll(allocateMq);
this.offsetStore.persistAll(mqs);
} catch (Exception e) {
log.error("group: " + this.defaultMQPushConsumer.getConsumerGroup() + " persistConsumerOffset exception", e);
}
}
@Override
public void updateTopicSubscribeInfo(String topic, Set<MessageQueue> info) {
Map<String, SubscriptionData> subTable = this.getSubscriptionInner();
if (subTable != null) {
if (subTable.containsKey(topic)) {
this.rebalanceImpl.topicSubscribeInfoTable.put(topic, info);
}
}
}
@Override
public boolean isSubscribeTopicNeedUpdate(String topic) {
Map<String, SubscriptionData> subTable = this.getSubscriptionInner();
if (subTable != null) {
if (subTable.containsKey(topic)) {
return !this.rebalanceImpl.topicSubscribeInfoTable.containsKey(topic);
}
}
return false;
}
@Override
public boolean isUnitMode() {
return this.defaultMQPushConsumer.isUnitMode();
}
@Override
public ConsumerRunningInfo consumerRunningInfo() {
ConsumerRunningInfo info = new ConsumerRunningInfo();
Properties prop = MixAll.object2Properties(this.defaultMQPushConsumer);
prop.put(ConsumerRunningInfo.PROP_CONSUME_ORDERLY, String.valueOf(this.consumeOrderly));
prop.put(ConsumerRunningInfo.PROP_THREADPOOL_CORE_SIZE, String.valueOf(this.consumeMessageService.getCorePoolSize()));
prop.put(ConsumerRunningInfo.PROP_CONSUMER_START_TIMESTAMP, String.valueOf(this.consumerStartTimestamp));
info.setProperties(prop);
Set<SubscriptionData> subSet = this.subscriptions();
info.getSubscriptionSet().addAll(subSet);
Iterator<Entry<MessageQueue, ProcessQueue>> it = this.rebalanceImpl.getProcessQueueTable().entrySet().iterator();
while (it.hasNext()) {
Entry<MessageQueue, ProcessQueue> next = it.next();
MessageQueue mq = next.getKey();
ProcessQueue pq = next.getValue();
ProcessQueueInfo pqinfo = new ProcessQueueInfo();
pqinfo.setCommitOffset(this.offsetStore.readOffset(mq, ReadOffsetType.MEMORY_FIRST_THEN_STORE));
pq.fillProcessQueueInfo(pqinfo);
info.getMqTable().put(mq, pqinfo);
}
Iterator<Entry<MessageQueue, PopProcessQueue>> popIt = this.rebalanceImpl.getPopProcessQueueTable().entrySet().iterator();
while (popIt.hasNext()) {
Entry<MessageQueue, PopProcessQueue> next = popIt.next();
MessageQueue mq = next.getKey();
PopProcessQueue pq = next.getValue();
PopProcessQueueInfo pqinfo = new PopProcessQueueInfo();
pq.fillPopProcessQueueInfo(pqinfo);
info.getMqPopTable().put(mq, pqinfo);
}
for (SubscriptionData sd : subSet) {
ConsumeStatus consumeStatus = this.mQClientFactory.getConsumerStatsManager().consumeStatus(this.groupName(), sd.getTopic());
info.getStatusTable().put(sd.getTopic(), consumeStatus);
}
return info;
}
public MQClientInstance getmQClientFactory() {
return mQClientFactory;
}
public void setmQClientFactory(MQClientInstance mQClientFactory) {
this.mQClientFactory = mQClientFactory;
}
public ServiceState getServiceState() {
return serviceState;
}
//Don't use this deprecated setter, which will be removed soon.
@Deprecated
public synchronized void setServiceState(ServiceState serviceState) {
this.serviceState = serviceState;
}
public void adjustThreadPool() {
long computeAccTotal = this.computeAccumulationTotal();
long adjustThreadPoolNumsThreshold = this.defaultMQPushConsumer.getAdjustThreadPoolNumsThreshold();
long incThreshold = (long) (adjustThreadPoolNumsThreshold * 1.0);
long decThreshold = (long) (adjustThreadPoolNumsThreshold * 0.8);
if (computeAccTotal >= incThreshold) {
this.consumeMessageService.incCorePoolSize();
}
if (computeAccTotal < decThreshold) {
this.consumeMessageService.decCorePoolSize();
}
}
private long computeAccumulationTotal() {
long msgAccTotal = 0;
ConcurrentMap<MessageQueue, ProcessQueue> processQueueTable = this.rebalanceImpl.getProcessQueueTable();
Iterator<Entry<MessageQueue, ProcessQueue>> it = processQueueTable.entrySet().iterator();
while (it.hasNext()) {
Entry<MessageQueue, ProcessQueue> next = it.next();
ProcessQueue value = next.getValue();
msgAccTotal += value.getMsgAccCnt();
}
return msgAccTotal;
}
public List<QueueTimeSpan> queryConsumeTimeSpan(final String topic)
throws RemotingException, MQClientException, InterruptedException, MQBrokerException {
List<QueueTimeSpan> queueTimeSpan = new ArrayList<>();
TopicRouteData routeData = this.mQClientFactory.getMQClientAPIImpl().getTopicRouteInfoFromNameServer(topic, 3000);
for (BrokerData brokerData : routeData.getBrokerDatas()) {
String addr = brokerData.selectBrokerAddr();
queueTimeSpan.addAll(this.mQClientFactory.getMQClientAPIImpl().queryConsumeTimeSpan(addr, topic, groupName(), 3000));
}
return queueTimeSpan;
}
public void tryResetPopRetryTopic(final List<MessageExt> msgs, String consumerGroup) {
String popRetryPrefix = MixAll.RETRY_GROUP_TOPIC_PREFIX + consumerGroup + "_";
for (MessageExt msg : msgs) {
if (msg.getTopic().startsWith(popRetryPrefix)) {
String normalTopic = KeyBuilder.parseNormalTopic(msg.getTopic(), consumerGroup);
if (normalTopic != null && !normalTopic.isEmpty()) {
msg.setTopic(normalTopic);
}
}
}
}
public void resetRetryAndNamespace(final List<MessageExt> msgs, String consumerGroup) {
final String groupTopic = MixAll.getRetryTopic(consumerGroup);
for (MessageExt msg : msgs) {
String retryTopic = msg.getProperty(MessageConst.PROPERTY_RETRY_TOPIC);
if (retryTopic != null && groupTopic.equals(msg.getTopic())) {
msg.setTopic(retryTopic);
}
if (StringUtils.isNotEmpty(this.defaultMQPushConsumer.getNamespace())) {
msg.setTopic(NamespaceUtil.withoutNamespace(msg.getTopic(), this.defaultMQPushConsumer.getNamespace()));
}
}
}
public ConsumeMessageService getConsumeMessageService() {
return consumeMessageService;
}
public void setConsumeMessageService(ConsumeMessageService consumeMessageService) {
this.consumeMessageService = consumeMessageService;
}
public void setPullTimeDelayMillsWhenException(long pullTimeDelayMillsWhenException) {
this.pullTimeDelayMillsWhenException = pullTimeDelayMillsWhenException;
}
int[] getPopDelayLevel() {
return popDelayLevel;
}
public MessageQueueListener getMessageQueueListener() {
if (null == defaultMQPushConsumer) {
return null;
}
return defaultMQPushConsumer.getMessageQueueListener();
}
}
|
filter
|
java
|
playframework__playframework
|
documentation/manual/working/javaGuide/main/http/code/javaguide/http/JavaResponse.java
|
{
"start": 1089,
"end": 10521
}
|
class ____ extends WithApplication {
@Test
public void textContentType() {
// #text-content-type
Result textResult = ok("Hello World!");
// #text-content-type
assertThat(textResult.contentType())
.hasValueSatisfying(__ -> assertThat(__).contains("text/plain"));
}
@Test
public void jsonContentType() {
String object = "";
// #json-content-type
JsonNode json = Json.toJson(object);
Result jsonResult = ok(json);
// #json-content-type
assertThat(jsonResult.contentType())
.hasValueSatisfying(__ -> assertThat(__).contains("application/json"));
}
@Test
public void customContentType() {
// #custom-content-type
Result htmlResult = ok("<h1>Hello World!</h1>").as("text/html");
// #custom-content-type
assertThat(htmlResult.contentType())
.hasValueSatisfying(__ -> assertThat(__).contains("text/html"));
}
@Test
public void customDefiningContentType() {
// #content-type_defined_html
Result htmlResult = ok("<h1>Hello World!</h1>").as(MimeTypes.HTML);
// #content-type_defined_html
assertThat(htmlResult.contentType())
.hasValueSatisfying(__ -> assertThat(__).contains("text/html"));
}
@Test
public void responseHeaders() {
Map<String, String> headers =
call(
new MockJavaAction(instanceOf(JavaHandlerComponents.class)) {
// #response-headers
public Result index() {
return ok("<h1>Hello World!</h1>")
.as(MimeTypes.HTML)
.withHeader(CACHE_CONTROL, "max-age=3600")
.withHeader(ETAG, "some-etag-calculated-value");
}
// #response-headers
},
fakeRequest(),
mat)
.headers();
assertThat(headers.get(CACHE_CONTROL)).isEqualTo("max-age=3600");
assertThat(headers.get(ETAG)).isEqualTo("some-etag-calculated-value");
}
@Test
public void setCookie() {
Http.Cookies cookies =
call(
new MockJavaAction(instanceOf(JavaHandlerComponents.class)) {
// #set-cookie
public Result index() {
return ok("<h1>Hello World!</h1>")
.as(MimeTypes.HTML)
.withCookies(Cookie.builder("theme", "blue").build());
}
// #set-cookie
},
fakeRequest(),
mat)
.cookies();
Optional<Cookie> cookie = cookies.get("theme");
assertThat(cookie)
.isPresent()
.hasValueSatisfying(__ -> assertThat(__.value()).isEqualTo("blue"));
}
@Test
public void detailedSetCookie() {
Http.Cookies cookies =
call(
new MockJavaAction(instanceOf(JavaHandlerComponents.class)) {
// #detailed-set-cookie
public Result index() {
return ok("<h1>Hello World!</h1>")
.as(MimeTypes.HTML)
.withCookies(
Cookie.builder("theme", "blue")
.withMaxAge(Duration.ofSeconds(3600))
.withPath("/some/path")
.withDomain(".example.com")
.withSecure(false)
.withHttpOnly(true)
.withSameSite(Cookie.SameSite.STRICT)
.withPartitioned(false)
.build());
}
// #detailed-set-cookie
},
fakeRequest(),
mat)
.cookies();
Optional<Cookie> cookieOpt = cookies.get("theme");
assertThat(cookieOpt)
.isPresent()
.hasValueSatisfying(
cookie -> {
assertThat(cookie.name()).isEqualTo("theme");
assertThat(cookie.value()).isEqualTo("blue");
assertThat(cookie.maxAge()).isEqualTo(3600);
assertThat(cookie.path()).isEqualTo("/some/path");
assertThat(cookie.domain()).isEqualTo(".example.com");
assertThat(cookie.secure()).isFalse();
assertThat(cookie.httpOnly()).isTrue();
assertThat(cookie.sameSite()).hasValue(Cookie.SameSite.STRICT);
assertThat(cookie.partitioned()).isFalse();
});
}
@Test
public void discardCookie() {
Http.Cookies cookies =
call(
new MockJavaAction(instanceOf(JavaHandlerComponents.class)) {
// #discard-cookie
public Result index() {
return ok("<h1>Hello World!</h1>").as(MimeTypes.HTML).discardingCookie("theme");
}
// #discard-cookie
},
fakeRequest(),
mat)
.cookies();
Optional<Cookie> cookie = cookies.get("theme");
assertThat(cookie)
.isPresent()
.hasValueSatisfying(
c -> {
assertThat(c.name()).isEqualTo("theme");
assertThat(c.value()).isEmpty();
});
}
@Test
public void charset() {
assertThat(
call(
new MockJavaAction(instanceOf(JavaHandlerComponents.class)) {
// #charset
public Result index() {
return ok("<h1>Hello World!</h1>", "iso-8859-1")
.as("text/html; charset=iso-8859-1");
}
// #charset
},
fakeRequest(),
mat)
.charset())
.hasValue("iso-8859-1");
}
@Test
public void rangeResultInputStream() {
Result result =
call(
new MockJavaAction(instanceOf(JavaHandlerComponents.class)) {
// #range-result-input-stream
public Result index(Http.Request request) {
String content = "This is the full content!";
InputStream input = getInputStream(content);
return RangeResults.ofStream(request, input, content.length());
}
// #range-result-input-stream
private InputStream getInputStream(String content) {
return new ByteArrayInputStream(content.getBytes());
}
},
fakeRequest().header(RANGE, "bytes=0-3"),
mat);
assertThat(result.status()).isEqualTo(PARTIAL_CONTENT);
assertThat(Helpers.contentAsString(result, mat)).isEqualTo("This");
}
@Test
public void rangeResultSource() {
Result result =
call(
new MockJavaAction(instanceOf(JavaHandlerComponents.class)) {
// #range-result-source
public Result index(Http.Request request) {
String content = "This is the full content!";
Source<ByteString, NotUsed> source = sourceFrom(content);
return RangeResults.ofSource(
request, (long) content.length(), source, "file.txt", MimeTypes.TEXT);
}
// #range-result-source
private Source<ByteString, NotUsed> sourceFrom(String content) {
List<ByteString> byteStrings =
content
.chars()
.boxed()
.map(c -> ByteString.fromArray(new byte[] {c.byteValue()}))
.collect(Collectors.toList());
return org.apache.pekko.stream.javadsl.Source.from(byteStrings);
}
},
fakeRequest().header(RANGE, "bytes=0-3"),
mat);
assertThat(result.status()).isEqualTo(PARTIAL_CONTENT);
assertThat(Helpers.contentAsString(result, mat)).isEqualTo("This");
}
@Test
public void rangeResultSourceOffset() {
Result result =
call(
new MockJavaAction(instanceOf(JavaHandlerComponents.class)) {
// #range-result-source-with-offset
public Result index(Http.Request request) {
String content = "This is the full content!";
return RangeResults.ofSource(
request,
(long) content.length(),
offset ->
new RangeResults.SourceAndOffset(offset, sourceFrom(content).drop(offset)),
"file.txt",
MimeTypes.TEXT);
}
// #range-result-source-with-offset
private Source<ByteString, NotUsed> sourceFrom(String content) {
List<ByteString> byteStrings =
content
.chars()
.boxed()
.map(c -> ByteString.fromArray(new byte[] {c.byteValue()}))
.collect(Collectors.toList());
return org.apache.pekko.stream.javadsl.Source.from(byteStrings);
}
},
fakeRequest().header(RANGE, "bytes=8-10"),
mat);
assertThat(result.status()).isEqualTo(PARTIAL_CONTENT);
assertThat(Helpers.contentAsString(result, mat)).isEqualTo("the");
}
}
|
JavaResponse
|
java
|
elastic__elasticsearch
|
x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/MvEvaluatorImplementer.java
|
{
"start": 22331,
"end": 23653
}
|
class ____ {
static SingleValueFunction from(TypeElement declarationType, String name, TypeName resultType, TypeName fieldType) {
if (name.equals("")) {
return null;
}
ExecutableElement fn = findMethod(
declarationType,
new String[] { name },
m -> m.getParameters().size() == 1 && TypeName.get(m.getParameters().get(0).asType()).equals(fieldType)
);
if (fn == null) {
throw new IllegalArgumentException("Couldn't find " + declarationType + "#" + name + "(" + fieldType + ")");
}
return new SingleValueFunction(declarationType, resultType, fn);
}
private final List<Object> invocationArgs = new ArrayList<>();
private SingleValueFunction(TypeElement declarationType, TypeName resultType, ExecutableElement fn) {
invocationArgs.add(resultType);
invocationArgs.add(declarationType);
invocationArgs.add(fn.getSimpleName());
}
private void call(MethodSpec.Builder builder) {
builder.addStatement("$T result = $T.$L(value)", invocationArgs.toArray());
}
}
/**
* Function handling blocks of ascending values.
*/
private
|
SingleValueFunction
|
java
|
google__dagger
|
dagger-compiler/main/java/dagger/internal/codegen/validation/ProvidesMethodValidator.java
|
{
"start": 2345,
"end": 2752
}
|
class ____ extends MethodValidator {
Validator(XMethodElement method) {
super(method);
}
/** Adds an error if a {@link dagger.Provides @Provides} method depends on a producer type. */
@Override
protected void checkParameter(XVariableElement parameter) {
super.checkParameter(parameter);
dependencyRequestValidator.checkNotProducer(report, parameter);
}
}
}
|
Validator
|
java
|
mybatis__mybatis-3
|
src/test/java/org/apache/ibatis/submitted/foreach/ForEachTest.java
|
{
"start": 4343,
"end": 8057
}
|
class ____.apache.ibatis.submitted.foreach.User'");
}
}
@Test
void shouldRemoveItemVariableInTheContext() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
int result = mapper.itemVariableConflict(5, Arrays.asList(1, 2), Arrays.asList(3, 4));
Assertions.assertEquals(5, result);
}
}
@Test
void shouldRemoveIndexVariableInTheContext() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
int result = mapper.indexVariableConflict(4, Arrays.asList(6, 7), Arrays.asList(8, 9));
Assertions.assertEquals(4, result);
}
}
@Test
void shouldAllowNullWhenAttributeIsOmitAndConfigurationIsDefault() throws IOException, SQLException {
SqlSessionFactory sqlSessionFactory;
try (Reader reader = Resources.getResourceAsReader("org/apache/ibatis/submitted/foreach/mybatis-config.xml")) {
sqlSessionFactory = new SqlSessionFactoryBuilder().build(reader);
}
BaseDataTest.runScript(sqlSessionFactory.getConfiguration().getEnvironment().getDataSource(),
"org/apache/ibatis/submitted/foreach/CreateDB.sql");
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user = new User();
user.setFriendList(null);
mapper.countUserWithNullableIsOmit(user);
Assertions.fail();
} catch (PersistenceException e) {
Assertions.assertEquals("The expression 'friendList' evaluated to a null value.", e.getCause().getMessage());
}
}
@Test
void shouldAllowNullWhenAttributeIsOmitAndConfigurationIsTrue() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
sqlSessionFactory.getConfiguration().setNullableOnForEach(true);
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user = new User();
user.setFriendList(null);
int result = mapper.countUserWithNullableIsOmit(user);
Assertions.assertEquals(6, result);
}
}
@Test
void shouldNotAllowNullWhenAttributeIsOmitAndConfigurationIsFalse() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
sqlSessionFactory.getConfiguration().setNullableOnForEach(false);
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user = new User();
user.setFriendList(null);
mapper.countUserWithNullableIsOmit(user);
Assertions.fail();
} catch (PersistenceException e) {
Assertions.assertEquals("The expression 'friendList' evaluated to a null value.", e.getCause().getMessage());
}
}
@Test
void shouldAllowNullWhenAttributeIsTrueAndConfigurationIsFalse() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
sqlSessionFactory.getConfiguration().setNullableOnForEach(false);
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user = new User();
user.setFriendList(null);
int result = mapper.countUserWithNullableIsTrue(user);
Assertions.assertEquals(6, result);
}
}
@Test
void shouldNotAllowNullWhenAttributeIsFalseAndConfigurationIsTrue() {
try (SqlSession sqlSession = sqlSessionFactory.openSession()) {
sqlSessionFactory.getConfiguration().setNullableOnForEach(true);
Mapper mapper = sqlSession.getMapper(Mapper.class);
User user = new User();
user.setFriendList(null);
mapper.countUserWithNullableIsFalse(user);
Assertions.fail();
} catch (PersistenceException e) {
Assertions.assertEquals("The expression 'friendList' evaluated to a null value.", e.getCause().getMessage());
}
}
}
|
org
|
java
|
spring-projects__spring-security
|
itest/context/src/integration-test/java/org/springframework/security/integration/StubUserRepository.java
|
{
"start": 684,
"end": 784
}
|
class ____ implements UserRepository {
@Override
public void doSomething() {
}
}
|
StubUserRepository
|
java
|
apache__flink
|
flink-tests/src/test/java/org/apache/flink/test/checkpointing/CoStreamCheckpointingITCase.java
|
{
"start": 10502,
"end": 12128
}
|
class ____ extends RichReduceFunction<PrefixCount> {
private static volatile boolean hasFailed = false;
private final long numElements;
private long failurePos;
private long count;
OnceFailingReducer(long numElements) {
this.numElements = numElements;
}
@Override
public void open(OpenContext openContext) {
long failurePosMin =
(long)
(0.4
* numElements
/ getRuntimeContext()
.getTaskInfo()
.getNumberOfParallelSubtasks());
long failurePosMax =
(long)
(0.7
* numElements
/ getRuntimeContext()
.getTaskInfo()
.getNumberOfParallelSubtasks());
failurePos =
(new Random().nextLong() % (failurePosMax - failurePosMin)) + failurePosMin;
count = 0;
}
@Override
public PrefixCount reduce(PrefixCount value1, PrefixCount value2) throws Exception {
count++;
if (!hasFailed && count >= failurePos) {
hasFailed = true;
throw new Exception("Test Failure");
}
value1.count += value2.count;
return value1;
}
}
private static
|
OnceFailingReducer
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/processor/XPathWithNamespaceBuilderFilterAndResultTypeTest.java
|
{
"start": 946,
"end": 1681
}
|
class ____ extends XPathWithNamespaceBuilderFilterTest {
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
public void configure() {
// START SNIPPET: example
// lets define the namespaces we'll need in our filters
Namespaces ns = new Namespaces("c", "http://acme.com/cheese").add("xsd", "http://www.w3.org/2001/XMLSchema");
// now lets create an xpath based Message Filter
from("direct:start").filter(xpath("/c:person[@name='James']", String.class, ns)).to("mock:result");
// END SNIPPET: example
}
};
}
}
|
XPathWithNamespaceBuilderFilterAndResultTypeTest
|
java
|
google__guava
|
android/guava-tests/test/com/google/common/util/concurrent/JdkFutureAdaptersTest.java
|
{
"start": 6980,
"end": 8135
}
|
class ____<V> implements Future<V> {
final CountDownLatch allowGetToComplete = new CountDownLatch(1);
@Override
public boolean cancel(boolean mayInterruptIfRunning) {
throw new AssertionFailedError();
}
@Override
public V get() throws InterruptedException {
/*
* Wait a little to give us time to call addListener before the future's
* value is set in addition to the call we'll make after then.
*/
allowGetToComplete.await(1, SECONDS);
throw new RuntimeException("expected, should be caught");
}
@Override
public V get(long timeout, TimeUnit unit) {
throw new AssertionFailedError();
}
@Override
public boolean isCancelled() {
throw new AssertionFailedError();
}
@Override
public boolean isDone() {
/*
* If isDone is true during the call to listenInPoolThread,
* listenInPoolThread doesn't start a thread. Make sure it's false the
* first time through (and forever after, since no one else cares about
* it).
*/
return false;
}
}
private static final
|
RuntimeExceptionThrowingFuture
|
java
|
apache__hadoop
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/TestLocalAllocationTagsManager.java
|
{
"start": 1992,
"end": 6317
}
|
class ____ {
private RMContext rmContext;
@BeforeEach
public void setup() {
MockRM rm = new MockRM();
rm.start();
MockNodes.resetHostIds();
List<RMNode> rmNodes =
MockNodes.newNodes(2, 4, Resource.newInstance(4096, 4));
for (RMNode rmNode : rmNodes) {
rm.getRMContext().getRMNodes().putIfAbsent(rmNode.getNodeID(), rmNode);
}
rmContext = rm.getRMContext();
}
@Test
public void testTempContainerAllocations()
throws InvalidAllocationTagsQueryException {
/**
* Construct both TEMP and normal containers: Node1: TEMP container_1_1
* (mapper/reducer/app_1) container_1_2 (service/app_1)
*
* Node2: container_1_3 (reducer/app_1) TEMP container_2_1 (service/app_2)
*/
AllocationTagsManager atm = new AllocationTagsManager(rmContext);
LocalAllocationTagsManager ephAtm =
new LocalAllocationTagsManager(atm);
// 3 Containers from app1
ephAtm.addTempTags(NodeId.fromString("host1:123"),
TestUtils.getMockApplicationId(1),
ImmutableSet.of("mapper", "reducer"));
atm.addContainer(NodeId.fromString("host1:123"),
TestUtils.getMockContainerId(1, 2), ImmutableSet.of("service"));
atm.addContainer(NodeId.fromString("host2:123"),
TestUtils.getMockContainerId(1, 3), ImmutableSet.of("reducer"));
// 1 Container from app2
ephAtm.addTempTags(NodeId.fromString("host2:123"),
TestUtils.getMockApplicationId(2), ImmutableSet.of("service"));
// Expect tag mappings to be present including temp Tags
assertEquals(1,
atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
AllocationTags.createSingleAppAllocationTags(
TestUtils.getMockApplicationId(1),
ImmutableSet.of("mapper")),
Long::sum));
assertEquals(1,
atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
AllocationTags.createSingleAppAllocationTags(
TestUtils.getMockApplicationId(1),
ImmutableSet.of("service")),
Long::sum));
assertEquals(1,
atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
AllocationTags.createSingleAppAllocationTags(
TestUtils.getMockApplicationId(2),
ImmutableSet.of("service")),
Long::sum));
// Do a temp Tag cleanup on app2
ephAtm.cleanTempContainers(TestUtils.getMockApplicationId(2));
assertEquals(0,
atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
AllocationTags.createSingleAppAllocationTags(
TestUtils.getMockApplicationId(2),
ImmutableSet.of("service")),
Long::sum));
// Expect app1 to be unaffected
assertEquals(1,
atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
AllocationTags.createSingleAppAllocationTags(
TestUtils.getMockApplicationId(1),
ImmutableSet.of("mapper")),
Long::sum));
// Do a cleanup on app1 as well
ephAtm.cleanTempContainers(TestUtils.getMockApplicationId(1));
assertEquals(0,
atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
AllocationTags.createSingleAppAllocationTags(
TestUtils.getMockApplicationId(1),
ImmutableSet.of("mapper")),
Long::sum));
// Non temp-tags should be unaffected
assertEquals(1,
atm.getNodeCardinalityByOp(NodeId.fromString("host1:123"),
AllocationTags.createSingleAppAllocationTags(
TestUtils.getMockApplicationId(1),
ImmutableSet.of("service")),
Long::sum));
assertEquals(0,
atm.getNodeCardinalityByOp(NodeId.fromString("host2:123"),
AllocationTags.createSingleAppAllocationTags(
TestUtils.getMockApplicationId(2),
ImmutableSet.of("service")),
Long::sum));
// Expect app2 with no containers, and app1 with 2 containers across 2 nodes
assertEquals(2,
atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(1))
.getTypeToTagsWithCount().size());
assertNull(
atm.getPerAppNodeMappings().get(TestUtils.getMockApplicationId(2)));
}
}
|
TestLocalAllocationTagsManager
|
java
|
spring-projects__spring-security
|
core/src/test/java/org/springframework/security/authentication/anonymous/AnonymousAuthenticationProviderTests.java
|
{
"start": 1474,
"end": 3294
}
|
class ____ {
@Test
public void testDetectsAnInvalidKey() {
AnonymousAuthenticationProvider aap = new AnonymousAuthenticationProvider("qwerty");
AnonymousAuthenticationToken token = new AnonymousAuthenticationToken("WRONG_KEY", "Test",
AuthorityUtils.createAuthorityList("ROLE_ONE", "ROLE_TWO"));
assertThatExceptionOfType(BadCredentialsException.class).isThrownBy(() -> aap.authenticate(token));
}
@Test
public void testDetectsMissingKey() {
assertThatIllegalArgumentException().isThrownBy(() -> new AnonymousAuthenticationProvider(null));
}
@Test
public void testGettersSetters() {
AnonymousAuthenticationProvider aap = new AnonymousAuthenticationProvider("qwerty");
assertThat(aap.getKey()).isEqualTo("qwerty");
}
@Test
public void testIgnoresClassesItDoesNotSupport() {
AnonymousAuthenticationProvider aap = new AnonymousAuthenticationProvider("qwerty");
TestingAuthenticationToken token = new TestingAuthenticationToken("user", "password", "ROLE_A");
assertThat(aap.supports(TestingAuthenticationToken.class)).isFalse();
// Try it anyway
assertThat(aap.authenticate(token)).isNull();
}
@Test
public void testNormalOperation() {
AnonymousAuthenticationProvider aap = new AnonymousAuthenticationProvider("qwerty");
AnonymousAuthenticationToken token = new AnonymousAuthenticationToken("qwerty", "Test",
AuthorityUtils.createAuthorityList("ROLE_ONE", "ROLE_TWO"));
Authentication result = aap.authenticate(token);
assertThat(token).isEqualTo(result);
}
@Test
public void testSupports() {
AnonymousAuthenticationProvider aap = new AnonymousAuthenticationProvider("qwerty");
assertThat(aap.supports(AnonymousAuthenticationToken.class)).isTrue();
assertThat(aap.supports(TestingAuthenticationToken.class)).isFalse();
}
}
|
AnonymousAuthenticationProviderTests
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/BoxedPrimitiveConstructorTest.java
|
{
"start": 4849,
"end": 5156
}
|
class ____ {
{
String s = new String((String) null);
}
}
""")
.doTest();
}
@Test
public void autoboxing() {
compilationHelper
.addSourceLines(
"Test.java",
"""
public abstract
|
Test
|
java
|
dropwizard__dropwizard
|
dropwizard-hibernate/src/test/java/io/dropwizard/hibernate/LazyLoadingTest.java
|
{
"start": 6620,
"end": 7134
}
|
class ____ extends AbstractDAO<Dog> {
DogDAO(SessionFactory sessionFactory) {
super(sessionFactory);
}
Optional<Dog> findByName(String name) {
return Optional.ofNullable(get(name));
}
void create(Dog dog) throws HibernateException {
currentSession().setHibernateFlushMode(FlushMode.COMMIT);
currentSession().save(dog);
}
}
@Path("/dogs/{name}")
@Produces(MediaType.APPLICATION_JSON)
public static
|
DogDAO
|
java
|
elastic__elasticsearch
|
x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java
|
{
"start": 941,
"end": 4376
}
|
class ____ {
// There are eight 15min buckets in a two hour span, so matching that number as the fallback for very long buckets
private static final int DEFAULT_NUMBER_OF_BUCKETS_TO_SPAN = 8;
private static final long DEFAULT_CHECK_WINDOW_MS = 7_200_000L; // 2 hours in Milliseconds
/**
* This will build the appropriate detector given the parameters.
*
* If {@link DatafeedConfig#getDelayedDataCheckConfig()} is not `isEnabled()`, then a {@link NullDelayedDataDetector} is returned, which
* does not do any checks, and only supplies an empty collection.
*
* @param job The {@link Job} object for the given `datafeedConfig`
* @param datafeedConfig The {@link DatafeedConfig} for which to create the {@link DelayedDataDetector}
* @param client The {@link Client} capable of taking action against the ES Cluster.
* @param xContentRegistry The current NamedXContentRegistry with which to parse the query
* @return A new {@link DelayedDataDetector}
*/
public static DelayedDataDetector buildDetector(
Job job,
DatafeedConfig datafeedConfig,
Client client,
NamedXContentRegistry xContentRegistry
) {
if (datafeedConfig.getDelayedDataCheckConfig().isEnabled()) {
long window = validateAndCalculateWindowLength(
job.getAnalysisConfig().getBucketSpan(),
datafeedConfig.getDelayedDataCheckConfig().getCheckWindow()
);
long bucketSpan = job.getAnalysisConfig().getBucketSpan() == null ? 0 : job.getAnalysisConfig().getBucketSpan().millis();
return new DatafeedDelayedDataDetector(
bucketSpan,
window,
job.getId(),
job.getDataDescription().getTimeField(),
datafeedConfig.getParsedQuery(xContentRegistry),
datafeedConfig.getIndices().toArray(new String[0]),
datafeedConfig.getIndicesOptions(),
datafeedConfig.getRuntimeMappings(),
client
);
} else {
return new NullDelayedDataDetector();
}
}
private static long validateAndCalculateWindowLength(TimeValue bucketSpan, TimeValue currentWindow) {
if (bucketSpan == null) {
return 0;
}
if (currentWindow == null) { // we should provide a good default as the user did not specify a window
return Math.max(DEFAULT_CHECK_WINDOW_MS, DEFAULT_NUMBER_OF_BUCKETS_TO_SPAN * bucketSpan.millis());
}
if (currentWindow.compareTo(bucketSpan) < 0) {
throw new IllegalArgumentException(
Messages.getMessage(
Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_SMALL,
currentWindow.getStringRep(),
bucketSpan.getStringRep()
)
);
} else if (currentWindow.millis() > bucketSpan.millis() * DelayedDataCheckConfig.MAX_NUMBER_SPANABLE_BUCKETS) {
throw new IllegalArgumentException(
Messages.getMessage(
Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS,
currentWindow.getStringRep(),
bucketSpan.getStringRep()
)
);
}
return currentWindow.millis();
}
public static
|
DelayedDataDetectorFactory
|
java
|
google__error-prone
|
core/src/test/java/com/google/errorprone/bugpatterns/CharacterGetNumericValueTest.java
|
{
"start": 3589,
"end": 4011
}
|
class ____ {
void f() {
// BUG: Diagnostic contains: CharacterGetNumericValue
Character.getNumericValue(41);
}
}
""")
.doTest();
}
@Test
public void uCharacter_getNumericValue_char() {
helper
.addSourceLines(
"Test.java",
"""
import com.ibm.icu.lang.UCharacter;
|
Test
|
java
|
google__dagger
|
javatests/dagger/internal/codegen/ComponentCreatorTest.java
|
{
"start": 26576,
"end": 27020
}
|
interface ____"));
});
}
@Test
public void testMultipleSettersPerTypeIncludingResolvedGenericsFails() {
assume().that(compilerType).isEqualTo(JAVAC);
Source moduleFile =
CompilerTests.javaSource(
"test.TestModule",
"package test;",
"",
"import dagger.Module;",
"import dagger.Provides;",
"",
"@Module",
"final
|
Builder
|
java
|
apache__flink
|
flink-datastream-api/src/main/java/org/apache/flink/datastream/api/stream/NonKeyedPartitionStream.java
|
{
"start": 4502,
"end": 4680
}
|
interface ____ a combination of two {@link NonKeyedPartitionStream}. It will be
* used as the return value of operation with two output.
*/
@Experimental
|
represents
|
java
|
apache__camel
|
core/camel-core/src/test/java/org/apache/camel/util/DumpModelAsXmlChoiceFilterRoutePropertyPlaceholderTest.java
|
{
"start": 1177,
"end": 3447
}
|
class ____ extends ContextTestSupport {
@Test
public void testDumpModelAsXml() throws Exception {
String xml = PluginHelper.getModelToXMLDumper(context).dumpModelAsXml(context, context.getRouteDefinition("myRoute"));
assertNotNull(xml);
log.info(xml);
assertTrue(xml.contains("<header>{{duke}}</header>"));
assertTrue(xml.contains("<header>{{best}}</header>"));
assertTrue(xml.contains("<header>{{extra}}</header>"));
assertTrue(xml.contains("<simple>${body} contains 'Camel'</simple>"));
}
@Test
public void testDumpModelAsXmAl() throws Exception {
String xml = PluginHelper.getModelToXMLDumper(context).dumpModelAsXml(context, context.getRouteDefinition("a"));
assertNotNull(xml);
log.info(xml);
assertTrue(xml.contains("message=\"{{mypath}}\""));
assertTrue(xml.contains("<constant>bar</constant>"));
assertTrue(xml.contains("<expressionDefinition>header{test} is not null</expressionDefinition>"));
}
@Override
protected RouteBuilder createRouteBuilder() {
return new RouteBuilder() {
@Override
public void configure() {
Properties prop = new Properties();
prop.put("duke", "dude");
prop.put("best", "gold");
prop.put("extra", "extra-gold");
prop.put("mypath", "xpath");
context.getPropertiesComponent().setInitialProperties(prop);
from("direct:start").routeId("myRoute").to("log:input").transform().header("{{duke}}").choice().when()
.header("{{best}}").to("mock:gold").filter()
.header("{{extra}}").to("mock:extra-gold").endChoice().when().simple("${body} contains 'Camel'")
.to("mock:camel").otherwise().to("mock:other").end()
.to("mock:result");
from("seda:a").routeId("a").setProperty("foo").constant("bar").choice().when(header("test").isNotNull())
.log("not null").when(xpath("/foo/bar")).log("{{mypath}}")
.end().to("mock:a");
}
};
}
}
|
DumpModelAsXmlChoiceFilterRoutePropertyPlaceholderTest
|
java
|
google__dagger
|
hilt-android/main/java/dagger/hilt/android/internal/legacy/AggregatedElementProxy.java
|
{
"start": 1033,
"end": 1160
}
|
interface ____ {
/** A reference to the legacy package-private aggregating class. */
Class<?> value();
}
|
AggregatedElementProxy
|
java
|
spring-projects__spring-boot
|
module/spring-boot-cassandra/src/main/java/org/springframework/boot/cassandra/health/CassandraDriverReactiveHealthIndicator.java
|
{
"start": 1464,
"end": 2411
}
|
class ____ extends AbstractReactiveHealthIndicator {
private final CqlSession session;
/**
* Create a new {@link CassandraDriverReactiveHealthIndicator} instance.
* @param session the {@link CqlSession}.
*/
public CassandraDriverReactiveHealthIndicator(CqlSession session) {
super("Cassandra health check failed");
Assert.notNull(session, "'session' must not be null");
this.session = session;
}
@Override
protected Mono<Health> doHealthCheck(Health.Builder builder) {
return Mono.fromSupplier(() -> {
Collection<Node> nodes = this.session.getMetadata().getNodes().values();
Optional<Node> nodeUp = nodes.stream().filter((node) -> node.getState() == NodeState.UP).findAny();
builder.status(nodeUp.isPresent() ? Status.UP : Status.DOWN);
nodeUp.map(Node::getCassandraVersion).ifPresent((version) -> builder.withDetail("version", version));
return builder.build();
});
}
}
|
CassandraDriverReactiveHealthIndicator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.