id
stringlengths 29
30
| content
stringlengths 152
2.6k
|
|---|---|
codereview_new_java_data_7298
|
public static long[] andAckSet(long[] firstAckSet, long[] secondAckSet) {
public static boolean isAckSetEmpty(long[] ackSet) {
BitSetRecyclable bitSet = BitSetRecyclable.create().resetWords(ackSet);
return bitSet.isEmpty();
}
please recycle this bitSet or use `BitSetRecyclable.valueOf()`
public static long[] andAckSet(long[] firstAckSet, long[] secondAckSet) {
public static boolean isAckSetEmpty(long[] ackSet) {
BitSetRecyclable bitSet = BitSetRecyclable.create().resetWords(ackSet);
+ bitSet.recycle();
return bitSet.isEmpty();
}
|
codereview_new_java_data_7299
|
private Pair<Boolean, String> getMaxUsageBroker(
return Pair.of(hasBrokerBelowLowerBound, maxUsageBrokerName);
}
@Override
- public void onBrokerChange(Set<String> newBrokers) {
synchronized (activeBrokers) {
activeBrokers.clear();
activeBrokers.addAll(newBrokers);
Why not update the `brokerAvgResourceUsage` without maintaining a new `activeBrokers` map?
private Pair<Boolean, String> getMaxUsageBroker(
return Pair.of(hasBrokerBelowLowerBound, maxUsageBrokerName);
}
@Override
+ public void onActiveBrokersChange(Set<String> newBrokers) {
synchronized (activeBrokers) {
activeBrokers.clear();
activeBrokers.addAll(newBrokers);
|
codereview_new_java_data_7300
|
public interface LoadSheddingStrategy {
*
* @param activeBrokers active Brokers
*/
- default void onBrokerChange(Set<String> activeBrokers) {}
}
```suggestion
default void onActiveBrokersChange(Set<String> activeBrokers) {}
```
public interface LoadSheddingStrategy {
*
* @param activeBrokers active Brokers
*/
+ default void onActiveBrokersChange(Set<String> activeBrokers) {}
}
|
codereview_new_java_data_7301
|
public String printResourceUsage() {
cpu.percentUsage(), memory.percentUsage(), directMemory.percentUsage(), bandwidthIn.percentUsage(),
bandwidthOut.percentUsage());
}
- @Deprecated
public double getMaxResourceUsageWithWeight(final double cpuWeight, final double memoryWeight,
final double directMemoryWeight, final double bandwidthInWeight,
final double bandwidthOutWeight) {
Nit: what do you think about adding the `since = "3.0.0"` arg to the `@Deprecated` annotation. I did that in a recent PR because it has the benefit of making it clearer when we can remove it later. The history is clear in git, but this lets a user skip a few steps.
public String printResourceUsage() {
cpu.percentUsage(), memory.percentUsage(), directMemory.percentUsage(), bandwidthIn.percentUsage(),
bandwidthOut.percentUsage());
}
+ @Deprecated(since = "3.0.0")
public double getMaxResourceUsageWithWeight(final double cpuWeight, final double memoryWeight,
final double directMemoryWeight, final double bandwidthInWeight,
final double bandwidthOutWeight) {
|
codereview_new_java_data_7302
|
public class InMemoryRedeliveryTracker implements RedeliveryTracker {
private ConcurrentLongLongPairHashMap trackerCache = ConcurrentLongLongPairHashMap.newBuilder()
- .concurrencyLevel(2)
- .expectedItems(128)
.autoShrink(true)
.build();
Any reason to change the 256,1 to 128,2 ?
public class InMemoryRedeliveryTracker implements RedeliveryTracker {
private ConcurrentLongLongPairHashMap trackerCache = ConcurrentLongLongPairHashMap.newBuilder()
+ .concurrencyLevel(1)
+ .expectedItems(256)
.autoShrink(true)
.build();
|
codereview_new_java_data_7303
|
public void testTopicsDistribution() throws Exception {
log.info("Topics are distributed to consumers as {}", eventListener.getActiveConsumers());
Map<String, Integer> assigned = new HashMap<>();
eventListener.getActiveConsumers().forEach((k, v) -> assigned.compute(v, (t, c) -> c == null ? 1 : ++ c));
- assertEquals(assigned.size(), 10);
for (Consumer<?> consumer : consumerList) {
consumer.close();
}
do you want this to be "consumers" instead of 10 ?
this way the test is clearer
public void testTopicsDistribution() throws Exception {
log.info("Topics are distributed to consumers as {}", eventListener.getActiveConsumers());
Map<String, Integer> assigned = new HashMap<>();
eventListener.getActiveConsumers().forEach((k, v) -> assigned.compute(v, (t, c) -> c == null ? 1 : ++ c));
+ assertEquals(assigned.size(), consumers);
for (Consumer<?> consumer : consumerList) {
consumer.close();
}
|
codereview_new_java_data_7304
|
public LedgerOffloader getManagedLedgerOffloader(NamespaceName namespaceName, Of
});
}
- public boolean isRunning() {
- return this.state == State.Started || this.state == State.Init;
- }
-
public LedgerOffloader createManagedLedgerOffloader(OffloadPoliciesImpl offloadPolicies)
throws PulsarServerException {
try {
Not related
public LedgerOffloader getManagedLedgerOffloader(NamespaceName namespaceName, Of
});
}
public LedgerOffloader createManagedLedgerOffloader(OffloadPoliciesImpl offloadPolicies)
throws PulsarServerException {
try {
|
codereview_new_java_data_7305
|
protected synchronized void initializeBookKeeper(final ManagedLedgerInitializeLe
}
// Calculate total entries and size
- final List<Long> emptyLedgersToBeDeleted = new ArrayList<>();
Iterator<LedgerInfo> iterator = ledgers.values().iterator();
while (iterator.hasNext()) {
LedgerInfo li = iterator.next();
Is there a reason we are not using a thread safe list here? I think we might need one, though I have not read through all of this code.
protected synchronized void initializeBookKeeper(final ManagedLedgerInitializeLe
}
// Calculate total entries and size
+ final List<Long> emptyLedgersToBeDeleted = Collections.synchronizedList(new ArrayList<>());
Iterator<LedgerInfo> iterator = ledgers.values().iterator();
while (iterator.hasNext()) {
LedgerInfo li = iterator.next();
|
codereview_new_java_data_7306
|
public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager {
private final SplitCounter splitCounter = new SplitCounter();
// record load metrics
- private AtomicReference<List<Metrics>> brokerLoadMetrics = new AtomicReference<>();
// record unload metrics
private final AtomicReference<List<Metrics>> unloadMetrics = new AtomicReference();
// record split metrics
- private AtomicReference<List<Metrics>> splitMetrics = new AtomicReference<>();
private final ConcurrentOpenHashMap<String, CompletableFuture<Optional<BrokerLookupData>>>
lookupRequests = ConcurrentOpenHashMap.<String,
Does this field need the final modifier?
public class ExtensibleLoadManagerImpl implements ExtensibleLoadManager {
private final SplitCounter splitCounter = new SplitCounter();
// record load metrics
+ private final AtomicReference<List<Metrics>> brokerLoadMetrics = new AtomicReference<>();
// record unload metrics
private final AtomicReference<List<Metrics>> unloadMetrics = new AtomicReference();
// record split metrics
+ private final AtomicReference<List<Metrics>> splitMetrics = new AtomicReference<>();
private final ConcurrentOpenHashMap<String, CompletableFuture<Optional<BrokerLookupData>>>
lookupRequests = ConcurrentOpenHashMap.<String,
|
codereview_new_java_data_7307
|
public List<Metrics> toMetrics() {
m.put("brk_lb_bundles_split_total", splitCount);
metrics.add(m);
- for (var etr : breakdownCounters.entrySet()) {
var result = etr.getKey();
- for (var counter : etr.getValue().entrySet()) {
var reason = counter.getKey();
var count = counter.getValue();
Map<String, String> breakdownDims = new HashMap<>(dimensions);
Maybe we can use the exact type here, it's a little hard to know the type at first eye.
public List<Metrics> toMetrics() {
m.put("brk_lb_bundles_split_total", splitCount);
metrics.add(m);
+ for (Map.Entry<SplitDecision.Label, Map<SplitDecision.Reason, MutableLong>> etr
+ : breakdownCounters.entrySet()) {
var result = etr.getKey();
+ for (Map.Entry<SplitDecision.Reason, MutableLong> counter : etr.getValue().entrySet()) {
var reason = counter.getKey();
var count = counter.getValue();
Map<String, String> breakdownDims = new HashMap<>(dimensions);
|
codereview_new_java_data_7308
|
private synchronized long recoverBucketSnapshot() throws RuntimeException {
deletionFutures.add(immutableBucket.asyncDeleteBucketSnapshot());
}
- // Wait for all deletion futures to complete before proceeding
- try {
- FutureUtil.waitForAll(deletionFutures).get(AsyncOperationTimeoutSeconds, TimeUnit.SECONDS);
- } catch (InterruptedException | ExecutionException | TimeoutException e) {
- log.warn("asyncDeleteBucketSnapshot calls failed", e);
- if (e instanceof InterruptedException) {
- Thread.currentThread().interrupt();
- }
- }
-
MutableLong numberDelayedMessages = new MutableLong(0);
immutableBucketMap.values().forEach(bucket -> {
numberDelayedMessages.add(bucket.numberBucketDelayedMessages);
We do not need to wait for the snapshot deletion to complete, which means it should not block processes.
private synchronized long recoverBucketSnapshot() throws RuntimeException {
deletionFutures.add(immutableBucket.asyncDeleteBucketSnapshot());
}
MutableLong numberDelayedMessages = new MutableLong(0);
immutableBucketMap.values().forEach(bucket -> {
numberDelayedMessages.add(bucket.numberBucketDelayedMessages);
|
codereview_new_java_data_7309
|
public class ElasticSearchConfig implements Serializable {
@FieldDoc(
required = false,
defaultValue = "30000",
- help = "Idle connection timeout to prevent a connection timeouts."
)
private int connectionIdleTimeoutInMs = 30000;
I wonder if this config should be validated against [bulkFlushIntervalInMs](https://github.com/apache/pulsar/blob/09504017526308fd83f480aa8cb502e1cfe0f633/pulsar-io/elastic-search/src/main/java/org/apache/pulsar/io/elasticsearch/ElasticSearchConfig.java#L189) when bulk API is enabled - something like `connectionIdleTimeoutInMs > 2 * bulkFlushIntervalInMs` because it seems the connection will set idle by design in-between flushes
public class ElasticSearchConfig implements Serializable {
@FieldDoc(
required = false,
defaultValue = "30000",
+ help = "Idle connection timeout to prevent a connection timeout due to inactivity."
)
private int connectionIdleTimeoutInMs = 30000;
|
codereview_new_java_data_7310
|
protected void updateTopicPolicyByNamespacePolicy(Policies namespacePolicies) {
if (log.isDebugEnabled()) {
log.debug("[{}]updateTopicPolicyByNamespacePolicy,data={}", topic, namespacePolicies);
}
- if (namespacePolicies.deleted) {
- return;
- }
topicPolicies.getRetentionPolicies().updateNamespaceValue(namespacePolicies.retention_policies);
topicPolicies.getCompactionThreshold().updateNamespaceValue(namespacePolicies.compaction_threshold);
topicPolicies.getReplicationClusters().updateNamespaceValue(
Remove `namespacePolicies.deleted` logic to ensure the new topic will not be deleted by `PersistenTopic#checkReplication`.
protected void updateTopicPolicyByNamespacePolicy(Policies namespacePolicies) {
if (log.isDebugEnabled()) {
log.debug("[{}]updateTopicPolicyByNamespacePolicy,data={}", topic, namespacePolicies);
}
topicPolicies.getRetentionPolicies().updateNamespaceValue(namespacePolicies.retention_policies);
topicPolicies.getCompactionThreshold().updateNamespaceValue(namespacePolicies.compaction_threshold);
topicPolicies.getReplicationClusters().updateNamespaceValue(
|
codereview_new_java_data_7311
|
public CompletableFuture<Void> initialize() {
Policies policies = optPolicies.get();
- updateTopicPolicyByNamespacePolicy(policies);
initializeDispatchRateLimiterIfNeeded();
nit: unnecessary change for this PR
public CompletableFuture<Void> initialize() {
Policies policies = optPolicies.get();
+ this.updateTopicPolicyByNamespacePolicy(policies);
initializeDispatchRateLimiterIfNeeded();
|
codereview_new_java_data_7312
|
public CompletableFuture<Void> initialize() {
Policies policies = optPolicies.get();
- updateTopicPolicyByNamespacePolicy(policies);
initializeDispatchRateLimiterIfNeeded();
```suggestion
this.updateTopicPolicyByNamespacePolicy(policies);
```
public CompletableFuture<Void> initialize() {
Policies policies = optPolicies.get();
+ this.updateTopicPolicyByNamespacePolicy(policies);
initializeDispatchRateLimiterIfNeeded();
|
codereview_new_java_data_7313
|
public CompletableFuture<Transaction> build() {
return;
}
if (log.isDebugEnabled()) {
- log.debug("Success to new txn. txnID: {}", txnID);
}
TransactionImpl transaction = new TransactionImpl(client, timeUnit.toMillis(txnTimeout),
txnID.getLeastSigBits(), txnID.getMostSigBits());
```suggestion
log.debug("'newTransaction' command completed successfully for transaction: {}", txnID);
```
public CompletableFuture<Transaction> build() {
return;
}
if (log.isDebugEnabled()) {
+ log.debug("'newTransaction' command completed successfully for transaction: {}", txnID);
}
TransactionImpl transaction = new TransactionImpl(client, timeUnit.toMillis(txnTimeout),
txnID.getLeastSigBits(), txnID.getMostSigBits());
|
codereview_new_java_data_7314
|
void deleteNonDurableCursorWithName() throws Exception {
@Test
public void testMessagesConsumedCounterInitializedCorrect() throws Exception {
- ManagedLedger ledger = factory.open("testMessagesConsumedCounterInitializedCorrect",
new ManagedLedgerConfig().setRetentionTime(1, TimeUnit.HOURS).setRetentionSizeInMB(1));
Position position = ledger.addEntry("1".getBytes(Encoding));
NonDurableCursorImpl cursor = (NonDurableCursorImpl) ledger.newNonDurableCursor(PositionImpl.EARLIEST);
cursor.delete(position);
assertEquals(cursor.getMessagesConsumedCounter(), 1);
}
`ledger` should be closed finally
void deleteNonDurableCursorWithName() throws Exception {
@Test
public void testMessagesConsumedCounterInitializedCorrect() throws Exception {
+ ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("testMessagesConsumedCounterInitializedCorrect",
new ManagedLedgerConfig().setRetentionTime(1, TimeUnit.HOURS).setRetentionSizeInMB(1));
Position position = ledger.addEntry("1".getBytes(Encoding));
NonDurableCursorImpl cursor = (NonDurableCursorImpl) ledger.newNonDurableCursor(PositionImpl.EARLIEST);
cursor.delete(position);
assertEquals(cursor.getMessagesConsumedCounter(), 1);
+ assertTrue(cursor.getMessagesConsumedCounter() <= ledger.getEntriesAddedCounter());
+ // cleanup.
+ cursor.close();
+ ledger.close();
}
|
codereview_new_java_data_7315
|
public void testDoNotReplicateSystemTopic() throws Exception {
assertNull(consumerFromR2.receive(5, TimeUnit.SECONDS));
transaction.commit();
- // wait before evaluating stats for the system topics
Thread.sleep(500L);
Assert.assertEquals(admin1.topics().getStats(systemTopic).getReplication().size(), 0);
Assert.assertEquals(admin2.topics().getStats(systemTopic).getReplication().size(), 0);
Assert.assertEquals(admin3.topics().getStats(systemTopic).getReplication().size(), 0);
- Assert.assertEquals(consumerFromR2.receive(5, TimeUnit.SECONDS).getValue(),
- "1".getBytes(StandardCharsets.UTF_8));
cleanup();
setup();
}
what about moving these assertions after consumerR2 has received?
public void testDoNotReplicateSystemTopic() throws Exception {
assertNull(consumerFromR2.receive(5, TimeUnit.SECONDS));
transaction.commit();
+ Assert.assertEquals(consumerFromR2.receive(5, TimeUnit.SECONDS).getValue(),
+ "1".getBytes(StandardCharsets.UTF_8));
+
+ // wait extra 500ms before evaluating stats for the system topics
Thread.sleep(500L);
Assert.assertEquals(admin1.topics().getStats(systemTopic).getReplication().size(), 0);
Assert.assertEquals(admin2.topics().getStats(systemTopic).getReplication().size(), 0);
Assert.assertEquals(admin3.topics().getStats(systemTopic).getReplication().size(), 0);
cleanup();
setup();
}
|
codereview_new_java_data_7316
|
public void testGetWebSocketReadUri(String msgId, String msgIdQueryParam) throws
}
@Test
- public void testPrseMessageId() {
assertEquals(CmdRead.parseMessageId("latest"), MessageId.latest);
assertEquals(CmdRead.parseMessageId("earliest"), MessageId.earliest);
assertEquals(CmdRead.parseMessageId("20:-1"), new MessageIdImpl(20, -1, -1));
maybe typo:
```suggestion
public void testParseMessageId() {
```
public void testGetWebSocketReadUri(String msgId, String msgIdQueryParam) throws
}
@Test
+ public void testParseMessageId() {
assertEquals(CmdRead.parseMessageId("latest"), MessageId.latest);
assertEquals(CmdRead.parseMessageId("earliest"), MessageId.earliest);
assertEquals(CmdRead.parseMessageId("20:-1"), new MessageIdImpl(20, -1, -1));
|
codereview_new_java_data_7317
|
-/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
Need to add `*`
+/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
|
codereview_new_java_data_7318
|
private void recoveredCursor(PositionImpl position, Map<String, Long> properties
}
log.info("[{}] Cursor {} recovered to position {}", ledger.getName(), name, position);
this.cursorProperties = cursorProperties;
markDeletePosition = position;
persistentMarkDeletePosition = position;
inProgressMarkDeletePersistPosition = null;
readPosition = ledger.getNextValidPosition(position);
- messagesConsumedCounter = -getNumberOfEntries(Range.openClosed(position, ledger.getLastPosition()));
ledger.onCursorReadPositionUpdated(this, readPosition);
lastMarkDeleteEntry = new MarkDeleteEntry(markDeletePosition, properties, null, null);
// assign cursor-ledger so, it can be deleted when new ledger will be switched
Why change the order of this line? it doesn't seem to help.
private void recoveredCursor(PositionImpl position, Map<String, Long> properties
}
log.info("[{}] Cursor {} recovered to position {}", ledger.getName(), name, position);
this.cursorProperties = cursorProperties;
+ messagesConsumedCounter = -getNumberOfEntries(Range.openClosed(position, ledger.getLastPosition()));
markDeletePosition = position;
persistentMarkDeletePosition = position;
inProgressMarkDeletePersistPosition = null;
readPosition = ledger.getNextValidPosition(position);
ledger.onCursorReadPositionUpdated(this, readPosition);
lastMarkDeleteEntry = new MarkDeleteEntry(markDeletePosition, properties, null, null);
// assign cursor-ledger so, it can be deleted when new ledger will be switched
|
codereview_new_java_data_7319
|
public void setMetadataStoreProperty() {
originalProperty = System.getProperties().get(MetadataStoreFactoryImpl.METADATASTORE_PROVIDERS_PROPERTY);
System.setProperty(MetadataStoreFactoryImpl.METADATASTORE_PROVIDERS_PROPERTY,
MyMetadataStoreProvider.class.getName());
- MetadataStoreFactoryImpl.loadProviders();
}
@AfterClass
public void resetMetadataStoreProperty() {
if (originalProperty != null) {
System.getProperties().put(MetadataStoreFactoryImpl.METADATASTORE_PROVIDERS_PROPERTY, originalProperty);
}
- MetadataStoreFactoryImpl.loadProviders();
}
It looks unnecessary.
public void setMetadataStoreProperty() {
originalProperty = System.getProperties().get(MetadataStoreFactoryImpl.METADATASTORE_PROVIDERS_PROPERTY);
System.setProperty(MetadataStoreFactoryImpl.METADATASTORE_PROVIDERS_PROPERTY,
MyMetadataStoreProvider.class.getName());
}
@AfterClass
public void resetMetadataStoreProperty() {
if (originalProperty != null) {
System.getProperties().put(MetadataStoreFactoryImpl.METADATASTORE_PROVIDERS_PROPERTY, originalProperty);
}
}
|
codereview_new_java_data_7320
|
default long getLowWaterMark() {
* @return {@link TxnMeta} the txnMetas of slow transactions
*/
List<TxnMeta> getSlowTransactions(long timeout);
-
- /**
- * set recover end time.
- * @param time
- */
- void setRecoverEndTime(long time);
}
Sorry, forgot this one.
I think we don't need to add this one to the interface for now? Only the MLTransactionMetadataStore has the recovery stage. We can just check if it's an MLTransactionMetadataStore, then set the recover end time. Maybe it's not good enough for now, we can try to refactor it in the future.
default long getLowWaterMark() {
* @return {@link TxnMeta} the txnMetas of slow transactions
*/
List<TxnMeta> getSlowTransactions(long timeout);
}
|
codereview_new_java_data_7321
|
private static void notify(TopicEventsListener listener,
try {
listener.handleEvent(topic, event, stage, t);
} catch (Throwable ex) {
- log.error("TopicEventsListener exception while handling {}_{} for topic {}", event, stage, topic, ex);
}
}
We can print the `listener` so that if the `TopicEventListener` has implemented the `toString` method, we can know which `TopicEventListener` failed.
private static void notify(TopicEventsListener listener,
try {
listener.handleEvent(topic, event, stage, t);
} catch (Throwable ex) {
+ log.error("TopicEventsListener {} exception while handling {}_{} for topic {}",
+ listener, event, stage, topic, ex);
}
}
|
codereview_new_java_data_7322
|
import org.apache.pulsar.common.util.collections.TripleLongPriorityQueue;
@NotThreadSafe
-public class TripleLongPriorityDelayedIndexQueue implements DelayedIndexQueue {
private final TripleLongPriorityQueue queue;
I notice that this class is used to make `peek` and `pop` easier to use, and it is only used by `MutableBucket`, should it not exist as a separate public class?
import org.apache.pulsar.common.util.collections.TripleLongPriorityQueue;
@NotThreadSafe
+class TripleLongPriorityDelayedIndexQueue implements DelayedIndexQueue {
private final TripleLongPriorityQueue queue;
|
codereview_new_java_data_7323
|
import java.util.Objects;
import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat;
-public interface DelayedIndexQueue {
-
Comparator<DelayedMessageIndexBucketSnapshotFormat.DelayedIndex> COMPARATOR = (o1, o2) -> {
if (!Objects.equals(o1.getTimestamp(), o2.getTimestamp())) {
return Long.compare(o1.getTimestamp(), o2.getTimestamp());
This interface is only used by `MutableBucket` and is not universal. Is this interface not needed?
import java.util.Objects;
import org.apache.pulsar.broker.delayed.proto.DelayedMessageIndexBucketSnapshotFormat;
+interface DelayedIndexQueue {
Comparator<DelayedMessageIndexBucketSnapshotFormat.DelayedIndex> COMPARATOR = (o1, o2) -> {
if (!Objects.equals(o1.getTimestamp(), o2.getTimestamp())) {
return Long.compare(o1.getTimestamp(), o2.getTimestamp());
|
codereview_new_java_data_7324
|
public DelayedIndex pop() {
}
private DelayedIndex getValue(boolean needAdvanceCursor) {
while (segmentListACursor < segmentListA.size()
- && segmentACursor >= segmentListA.get(segmentListACursor).getIndexesCount()) {
segmentListACursor++;
}
while (segmentListBCursor < segmentListB.size()
- && segmentBCursor >= segmentListB.get(segmentListBCursor).getIndexesCount()) {
segmentListBCursor++;
}
It seems a little strange to use `segmentACursor` compare with different segment list sizes, normally, the `segmentACursor` should belong to a specific segment, do these checks are necessary?
public DelayedIndex pop() {
}
private DelayedIndex getValue(boolean needAdvanceCursor) {
+ // skip empty segment
while (segmentListACursor < segmentListA.size()
+ && segmentListA.get(segmentListACursor).getIndexesCount() == 0) {
segmentListACursor++;
}
while (segmentListBCursor < segmentListB.size()
+ && segmentListB.get(segmentListBCursor).getIndexesCount() == 0) {
segmentListBCursor++;
}
|
codereview_new_java_data_7325
|
public synchronized void close() throws PulsarServerException {
}
try {
this.brokerRegistry.close();
- } catch (Exception e) {
- throw new PulsarServerException(e);
}
- this.serviceUnitStateChannel.close();
- this.started = false;
}
private boolean isInternalTopic(String topic) {
Move it to `finally` because when `serviceUnitStateChannel.close()` throws an `PulsarServerException`, it will be skipped.
public synchronized void close() throws PulsarServerException {
}
try {
this.brokerRegistry.close();
+ } finally {
+ try {
+ this.serviceUnitStateChannel.close();
+ } finally {
+ this.started = false;
+ }
}
}
private boolean isInternalTopic(String topic) {
|
codereview_new_java_data_7326
|
void run() throws PulsarAdminException {
}
@Parameters(commandDescription = "Trim a topic")
private class TrimTopic extends CliCommand {
- @Parameter(description = "tenant/namespace", required = true)
private java.util.List<String> params;
@Override
It should be `topic name`?
void run() throws PulsarAdminException {
}
@Parameters(commandDescription = "Trim a topic")
private class TrimTopic extends CliCommand {
+ @Parameter(description = "persistent://tenant/namespace/topic", required = true)
private java.util.List<String> params;
@Override
|
codereview_new_java_data_7327
|
public void getLastMessageId(
}
}
- @PUT
@Path("/{tenant}/{namespace}/{topic}/trim")
@ApiOperation(value = " Trim a topic")
@ApiResponses(value = {
And is it better to change to @POST?
As I understand, the @PUT method usually used to create some resources.
public void getLastMessageId(
}
}
+ @POST
@Path("/{tenant}/{namespace}/{topic}/trim")
@ApiOperation(value = " Trim a topic")
@ApiResponses(value = {
|
codereview_new_java_data_7328
|
public void testFailedUpdatePartitionedTopic() throws Exception {
try {
admin.topics().createSubscription(partitionedTopicName + "-partition-" + startPartitions, subName1,
MessageId.earliest);
} catch (PulsarAdminException.PreconditionFailedException ex) {
// OK
}
if we expect an error we have to fail() if we reach this point
public void testFailedUpdatePartitionedTopic() throws Exception {
try {
admin.topics().createSubscription(partitionedTopicName + "-partition-" + startPartitions, subName1,
MessageId.earliest);
+ fail("Unexpected behaviour");
} catch (PulsarAdminException.PreconditionFailedException ex) {
// OK
}
|
codereview_new_java_data_7329
|
public void createPartitionedTopic(
}
}
- private static final Logger log = LoggerFactory.getLogger(PersistentTopics.class);
}
`PersistentTopics.class` -> `NonPersistentTopics.class` ?
public void createPartitionedTopic(
}
}
+ private static final Logger log = LoggerFactory.getLogger(NonPersistentTopics.class);
}
|
codereview_new_java_data_7330
|
public boolean trackDelayedDelivery(long ledgerId, long entryId, MessageMetadata
}
}
- protected synchronized NavigableSet<PositionImpl> getMessagesToReplayNow(int maxMessagesToRead) {
if (!redeliveryMessages.isEmpty()) {
return redeliveryMessages.getMessagesToReplayNow(maxMessagesToRead);
} else if (delayedDeliveryTracker.isPresent() && delayedDeliveryTracker.get().hasMessageAvailable()) {
We don't need to add a space.
public boolean trackDelayedDelivery(long ledgerId, long entryId, MessageMetadata
}
}
+ protected synchronized NavigableSet<PositionImpl> getMessagesToReplayNow(int maxMessagesToRead) {
if (!redeliveryMessages.isEmpty()) {
return redeliveryMessages.getMessagesToReplayNow(maxMessagesToRead);
} else if (delayedDeliveryTracker.isPresent() && delayedDeliveryTracker.get().hasMessageAvailable()) {
|
codereview_new_java_data_7331
|
protected void internalGetMessageById(AsyncResponse asyncResponse, long ledgerId
// will redirect if the topic not owned by current broker
getPartitionedTopicMetadataAsync(topicName, authoritative, false)
.thenAccept(partitionMetadata -> {
- if (!topicName.isPartitioned() && partitionMetadata.partitions > 0) {
log.warn("[{}] Not supported getMessageById operation on partitioned-topic {}",
clientAppId(), topicName);
asyncResponse.resume(new RestException(Status.METHOD_NOT_ALLOWED,
Need to remove '!topicName.isPartitioned()'
protected void internalGetMessageById(AsyncResponse asyncResponse, long ledgerId
// will redirect if the topic not owned by current broker
getPartitionedTopicMetadataAsync(topicName, authoritative, false)
.thenAccept(partitionMetadata -> {
+ if (partitionMetadata.partitions > 0) {
log.warn("[{}] Not supported getMessageById operation on partitioned-topic {}",
clientAppId(), topicName);
asyncResponse.resume(new RestException(Status.METHOD_NOT_ALLOWED,
|
codereview_new_java_data_7332
|
public void testCreateBytesSchema() {
// forbid admin api creating BYTES schema to be consistent with client side
try {
testSchemaInfoApi(Schema.BYTES, "schematest/test/test-BYTES");
} catch (Exception e) {
assertTrue(e.getMessage().contains("Do not upload a BYTES schema"));
}
Please add `fail()` here. Otherwise, the test will get passed if the bytes schema is uploaded.
public void testCreateBytesSchema() {
// forbid admin api creating BYTES schema to be consistent with client side
try {
testSchemaInfoApi(Schema.BYTES, "schematest/test/test-BYTES");
+ fail("should fail");
} catch (Exception e) {
assertTrue(e.getMessage().contains("Do not upload a BYTES schema"));
}
|
codereview_new_java_data_7333
|
public CompletableFuture<SchemaVersion> deleteSchemaAsync(boolean authoritative,
public CompletableFuture<SchemaVersion> postSchemaAsync(PostSchemaPayload payload, boolean authoritative) {
if (SchemaType.BYTES.name().equals(payload.getType())) {
- throw new RestException(Response.Status.NOT_ACCEPTABLE,
- "Do not upload a BYTES schema, because it's the default schema type");
}
return validateOwnershipAndOperationAsync(authoritative, TopicOperation.PRODUCE)
.thenCompose(__ -> getSchemaCompatibilityStrategyAsyncWithoutAuth())
Please return failed future to avoid additional` try-catch` when invoke this method.
public CompletableFuture<SchemaVersion> deleteSchemaAsync(boolean authoritative,
public CompletableFuture<SchemaVersion> postSchemaAsync(PostSchemaPayload payload, boolean authoritative) {
if (SchemaType.BYTES.name().equals(payload.getType())) {
+ return CompletableFuture.failedFuture(new RestException(Response.Status.NOT_ACCEPTABLE,
+ "Do not upload a BYTES schema, because it's the default schema type"));
}
return validateOwnershipAndOperationAsync(authoritative, TopicOperation.PRODUCE)
.thenCompose(__ -> getSchemaCompatibilityStrategyAsyncWithoutAuth())
|
codereview_new_java_data_7334
|
public TransactionPendingAckStats getStats(boolean lowWaterMarks) {
}
public void completeHandleFuture() {
- if (!this.pendingAckHandleCompletableFuture.isDone()) {
- this.pendingAckHandleCompletableFuture.complete(PendingAckHandleImpl.this);
- }
- if (recoverTime.getRecoverStartTime() != 0L) {
recoverTime.setRecoverEndTime(System.currentTimeMillis());
}
}
public void exceptionHandleFuture(Throwable t) {
- if (!this.pendingAckHandleCompletableFuture.isDone()) {
- this.pendingAckHandleCompletableFuture.completeExceptionally(t);
recoverTime.setRecoverEndTime(System.currentTimeMillis());
}
}
do we need this check?
CompleteableFuture can be completed only once (API contract + implementation confirms this), if needed one can use return value of complete() to check if it succeeded
public TransactionPendingAckStats getStats(boolean lowWaterMarks) {
}
public void completeHandleFuture() {
+ this.pendingAckHandleCompletableFuture.complete(PendingAckHandleImpl.this);
+ if (recoverTime.getRecoverStartTime() != 0L && recoverTime.getRecoverEndTime() == 0L) {
recoverTime.setRecoverEndTime(System.currentTimeMillis());
}
}
public void exceptionHandleFuture(Throwable t) {
+ final boolean completedNow = this.pendingAckHandleCompletableFuture.completeExceptionally(t);
+ if (completedNow) {
recoverTime.setRecoverEndTime(System.currentTimeMillis());
}
}
|
codereview_new_java_data_7335
|
public void checkEncryption() {
public void publishTxnMessage(TxnID txnID, long producerId, long sequenceId, long highSequenceId,
ByteBuf headersAndPayload, long batchSize, boolean isChunked, boolean isMarker) {
- if (!checkAndStartPublish(producerId, sequenceId, headersAndPayload, batchSize, null)) {
- return;
- }
MessagePublishContext messagePublishContext =
MessagePublishContext.get(this, sequenceId, highSequenceId, msgIn,
headersAndPayload.readableBytes(), batchSize, isChunked, System.nanoTime(), isMarker, null);
Is this its own bug? It seems unrelated to encryption.
public void checkEncryption() {
public void publishTxnMessage(TxnID txnID, long producerId, long sequenceId, long highSequenceId,
ByteBuf headersAndPayload, long batchSize, boolean isChunked, boolean isMarker) {
+ checkAndStartPublish(producerId, sequenceId, headersAndPayload, batchSize, null);
MessagePublishContext messagePublishContext =
MessagePublishContext.get(this, sequenceId, highSequenceId, msgIn,
headersAndPayload.readableBytes(), batchSize, isChunked, System.nanoTime(), isMarker, null);
|
codereview_new_java_data_7336
|
protected void splitServiceUnitOnceAndRetry(NamespaceService namespaceService,
// Retry several times on BadVersion
if ((ex.getCause() instanceof MetadataStoreException.BadVersionException)
&& (counter.incrementAndGet() < NamespaceService.BUNDLE_SPLIT_RETRY_LIMIT)) {
- pulsar.getExecutor().schedule(() -> pulsar.getOrderedExecutor()
- .execute(() -> splitServiceUnitOnceAndRetry(namespaceService, bundleFactory, bundle,
- serviceUnit, data, counter, startTime, completionFuture)),
- 100, MILLISECONDS);
} else if (ex instanceof IllegalArgumentException) {
completionFuture.completeExceptionally(ex);
} else {
Why do we chain two executors call?
protected void splitServiceUnitOnceAndRetry(NamespaceService namespaceService,
// Retry several times on BadVersion
if ((ex.getCause() instanceof MetadataStoreException.BadVersionException)
&& (counter.incrementAndGet() < NamespaceService.BUNDLE_SPLIT_RETRY_LIMIT)) {
+ pulsar.getExecutor().schedule(() -> splitServiceUnitOnceAndRetry(namespaceService, bundleFactory,
+ bundle, serviceUnit, data, counter, startTime, completionFuture), 100, MILLISECONDS);
} else if (ex instanceof IllegalArgumentException) {
completionFuture.completeExceptionally(ex);
} else {
|
codereview_new_java_data_7337
|
public int hashCode() {
return messageIdHashCode(ledgerId, entryId, partitionIndex, batchIndex);
}
- @Override
- public boolean equals(Object o) {
- if (o instanceof MessageId) {
- return compareTo((MessageId) o) == 0;
- }
- return false;
- }
-
@Override
public String toString() {
return ledgerId + ":" + entryId + ":" + partitionIndex + ":" + batchIndex;
This will throw an exception if `o` is an instance of `MessageId` but not `MessageIdImpl` nor `TopicMessageIdImpl`, like `MultiMessageIdImpl`.
Before this PR, it just return false.
public int hashCode() {
return messageIdHashCode(ledgerId, entryId, partitionIndex, batchIndex);
}
@Override
public String toString() {
return ledgerId + ":" + entryId + ":" + partitionIndex + ":" + batchIndex;
|
codereview_new_java_data_7338
|
protected void cleanup() throws Exception {
}
@DataProvider(name = "testTopic")
- public Object[][] testTopic() {
- return new Object[][] {
- {RECOVER_ABORT},
- {RECOVER_COMMIT}
};
}
@DataProvider(name = "enableSnapshotSegment")
- public Object[][] testSnapshot() {
- return new Object[][] {
- {true},
- {false}
};
}
Please avoid unnecessary changes.
protected void cleanup() throws Exception {
}
@DataProvider(name = "testTopic")
+ public Object[] testTopic() {
+ return new Object[] {
+ RECOVER_ABORT,
+ RECOVER_COMMIT
};
}
@DataProvider(name = "enableSnapshotSegment")
+ public Object[] testSnapshot() {
+ return new Boolean[] {
+ true,
+ false
};
}
|
codereview_new_java_data_7339
|
public abstract class CmdBase {
@Parameter(names = { "--help", "-h" }, help = true, hidden = true)
private boolean help = false;
public CmdBase(String cmdName, Supplier<PulsarAdmin> adminSupplier) {
this.adminSupplier = adminSupplier;
jcommander = new JCommander(this);
Is this variable/parameter useless and can it be deleted? Because `-h` is used to print the usage of `./bin/pulsar-admin`, but it has been defined in PulsarAdminTool.RootParams
https://github.com/apache/pulsar/blob/0d707aec92595868192380758a46f8f02886128f/pulsar-client-tools/src/main/java/org/apache/pulsar/admin/cli/PulsarAdminTool.java#L98-L99
public abstract class CmdBase {
@Parameter(names = { "--help", "-h" }, help = true, hidden = true)
private boolean help = false;
+ public boolean isHelp() {
+ return help;
+ }
+
public CmdBase(String cmdName, Supplier<PulsarAdmin> adminSupplier) {
this.adminSupplier = adminSupplier;
jcommander = new JCommander(this);
|
codereview_new_java_data_7340
|
public void calculateBrokerHostUsage() {
}
@VisibleForTesting
- public double getTotalNicLimitWithConfiguration(List<String> nics) {
// Use the override value as configured. Return the total max speed across all available NICs, converted
// from Gbps into Kbps
return overrideBrokerNicSpeedGbps.map(BitRateUnit.Gigabit::toKilobit)
it's a public method, so can delete `@VisibleForTesting`
public void calculateBrokerHostUsage() {
}
@VisibleForTesting
+ double getTotalNicLimitWithConfiguration(List<String> nics) {
// Use the override value as configured. Return the total max speed across all available NICs, converted
// from Gbps into Kbps
return overrideBrokerNicSpeedGbps.map(BitRateUnit.Gigabit::toKilobit)
|
codereview_new_java_data_7341
|
public void testGetChildren(String provider, Supplier<String> urlSupplier) throw
@Test(dataProvider = "impl", enabled = false)
public void testPut(String provider, Supplier<String> urlSupplier) throws Exception {
@Cleanup
- MetadataStore store = MetadataStoreFactory.create(urlSupplier.get(),
- MetadataStoreConfig.builder().fsyncEnable(false).build());
final int N_KEYS = 10_000;
final int N_PUTS = 100_000;
NIT: This benchmark is not even a unit test. It's more like a perf tool.
public void testGetChildren(String provider, Supplier<String> urlSupplier) throw
@Test(dataProvider = "impl", enabled = false)
public void testPut(String provider, Supplier<String> urlSupplier) throws Exception {
@Cleanup
+ MetadataStore store = MetadataStoreFactory.create(urlSupplier.get(), MetadataStoreConfig.builder().build());
final int N_KEYS = 10_000;
final int N_PUTS = 100_000;
|
codereview_new_java_data_7342
|
protected synchronized boolean trySendMessagesToConsumers(ReadType readType, Lis
}
for (Map.Entry<Consumer, List<Entry>> current : groupedEntries.entrySet()) {
Consumer consumer = current.getKey();
List<Entry> entriesWithSameKey = current.getValue();
int entriesWithSameKeyCount = entriesWithSameKey.size();
int availablePermits = Math.max(consumer.getAvailablePermits(), 0);
```suggestion
Consumer consumer = current.getKey();
assert consumer != null; // checked when added to groupedEntries
List<Entry> entriesWithSameKey = current.getValue();
```
... for better IDE lint.
protected synchronized boolean trySendMessagesToConsumers(ReadType readType, Lis
}
for (Map.Entry<Consumer, List<Entry>> current : groupedEntries.entrySet()) {
Consumer consumer = current.getKey();
+ assert consumer != null; // checked when added to groupedEntries
List<Entry> entriesWithSameKey = current.getValue();
int entriesWithSameKeyCount = entriesWithSameKey.size();
int availablePermits = Math.max(consumer.getAvailablePermits(), 0);
|
codereview_new_java_data_7343
|
public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i
MessageMetadata msgMetadata;
if (metadataArray != null) {
msgMetadata = metadataArray[metadataIndex];
- } else if (entry instanceof EntryAndMetadata entryAndMetadata) {
- msgMetadata = entryAndMetadata.getMetadata();
} else {
msgMetadata = Commands.peekAndCopyMessageMetadata(metadataAndPayload, subscription.toString(), -1);
}
Don't use the JDK 14 feature in the existing code. cos it will introduce a compatible risk of cherry-picking.
I would like to give this point a `request change` until someone has different options.
/cc @codelipenghui
public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i
MessageMetadata msgMetadata;
if (metadataArray != null) {
msgMetadata = metadataArray[metadataIndex];
+ } else if (entry instanceof EntryAndMetadata) {
+ msgMetadata = ((EntryAndMetadata) entry).getMetadata();
} else {
msgMetadata = Commands.peekAndCopyMessageMetadata(metadataAndPayload, subscription.toString(), -1);
}
|
codereview_new_java_data_7344
|
public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i
MessageMetadata msgMetadata;
if (metadataArray != null) {
msgMetadata = metadataArray[metadataIndex];
- } else if (entry instanceof EntryAndMetadata entryAndMetadata) {
- msgMetadata = entryAndMetadata.getMetadata();
} else {
msgMetadata = Commands.peekAndCopyMessageMetadata(metadataAndPayload, subscription.toString(), -1);
}
```suggestion
} else if (entry instanceof EntryAndMetadata) {
msgMetadata = ((EntryAndMetadata) entry).getMetadata();
```
Use JDK 8 compatible way so that we can cherry-pick it into older branches.
public int filterEntriesForConsumer(@Nullable MessageMetadata[] metadataArray, i
MessageMetadata msgMetadata;
if (metadataArray != null) {
msgMetadata = metadataArray[metadataIndex];
+ } else if (entry instanceof EntryAndMetadata) {
+ msgMetadata = ((EntryAndMetadata) entry).getMetadata();
} else {
msgMetadata = Commands.peekAndCopyMessageMetadata(metadataAndPayload, subscription.toString(), -1);
}
|
codereview_new_java_data_7345
|
public class DnsResolverUtil {
log.warn("Cannot get DNS TTL settings from sun.net.InetAddressCachePolicy class", e);
}
TTL = ttl <= 0 ? DEFAULT_TTL : ttl;
- NEGATIVE_TTL = negativeTtl < 0 ? DEFAULT_TTL : negativeTtl;
}
private DnsResolverUtil() {
```suggestion
NEGATIVE_TTL = negativeTtl < 0 ? DEFAULT_NEGATIVE_TTL : negativeTtl;
```
public class DnsResolverUtil {
log.warn("Cannot get DNS TTL settings from sun.net.InetAddressCachePolicy class", e);
}
TTL = ttl <= 0 ? DEFAULT_TTL : ttl;
+ NEGATIVE_TTL = negativeTtl < 0 ? DEFAULT_NEGATIVE_TTL : negativeTtl;
}
private DnsResolverUtil() {
|
codereview_new_java_data_7346
|
public void run() {
Assert.assertEquals(0, datas.size());
}
- /**
- * see https://github.com/apache/pulsar/pull/18491
- */
@Test
public void testMultiTopicConsumerConcurrentRedeliverAndReceive() throws Exception {
final String topic = BrokerTestUtil.newUniqueName("my-topic");
```suggestion
```
Remove the comments
public void run() {
Assert.assertEquals(0, datas.size());
}
@Test
public void testMultiTopicConsumerConcurrentRedeliverAndReceive() throws Exception {
final String topic = BrokerTestUtil.newUniqueName("my-topic");
|
codereview_new_java_data_7914
|
-/* (c) 2017 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*/
```suggestion
/* (c) 2023 Open Source Geospatial Foundation - all rights reserved
```
+/* (c) 2023 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*/
|
codereview_new_java_data_7915
|
-/* (c) 2017 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*/
```suggestion
/* (c) 2023 Open Source Geospatial Foundation - all rights reserved
```
+/* (c) 2023 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*/
|
codereview_new_java_data_7916
|
private static void callLifecycleHandlers(
try {
callback.accept(handler);
} catch (Throwable t) {
- LOGGER.logp(
Level.SEVERE,
- handler.getClass().getName(),
- name,
- "A GeoServer lifecycle handler threw an exception",
t);
}
}
So you are using logp to report the original class of the handler, while the method name is provided as a string... not refactoring friendly but breaking an interface is indeed an unlikely occurrence.
May confuse people reading the logs, as the exception reports one thing but the stack trace reports a slightly different location (it's going to be off by one row).
Anyways, -0, I'm not fond of this approach but not the end of the world.
private static void callLifecycleHandlers(
try {
callback.accept(handler);
} catch (Throwable t) {
+ LOGGER.log(
Level.SEVERE,
+ "A GeoServer lifecycle handler threw an exception during " + name,
t);
}
}
|
codereview_new_java_data_7917
|
protected MockHttpServletResponse dispatch(HttpServletRequest request, String ch
}
/**
- * Remove parmeters from mime type.
*
* @param mimeType the mime type
* @return with mime type without parameters
```suggestion
* Remove parameters from mime type.
```
protected MockHttpServletResponse dispatch(HttpServletRequest request, String ch
}
/**
+ * Remove parameters from mime type.
*
* @param mimeType the mime type
* @return with mime type without parameters
|
codereview_new_java_data_7918
|
protected void registerHandlerMethod(
if (patternsRequestCondition != null && patternsRequestCondition.getPatterns() != null) {
for (String pattern : patternsRequestCondition.getPatterns()) {
if (pattern.contains(GWC_URL_PATTERN)) {
- // this is an handler for GWC WMTS REST API
super.registerHandlerMethod(handler, method, mapping);
break;
}
```suggestion
// this is a handler for GWC WMTS REST API
```
protected void registerHandlerMethod(
if (patternsRequestCondition != null && patternsRequestCondition.getPatterns() != null) {
for (String pattern : patternsRequestCondition.getPatterns()) {
if (pattern.contains(GWC_URL_PATTERN)) {
+ // this is a handler for GWC WMTS REST API
super.registerHandlerMethod(handler, method, mapping);
break;
}
|
codereview_new_java_data_7919
|
-/* (c) 2026 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*/
```suggestion
/* (c) 2018 Open Source Geospatial Foundation - all rights reserved
```
+/* (c) 2018 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*/
|
codereview_new_java_data_7920
|
-/* (c) 20189 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*/
```suggestion
/* (c) 2018 Open Source Geospatial Foundation - all rights reserved
```
+/* (c) 2018 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*/
|
codereview_new_java_data_7921
|
import org.opengis.filter.sort.SortBy;
import org.springframework.beans.FatalBeanException;
public abstract class QueryableMappingRecordDescriptor extends AbstractRecordDescriptor {
private static final Logger LOGGER = Logging.getLogger(QueryableMappingRecordDescriptor.class);
Javadoc for public class? In particular, trying to understand the reason for this abstract class... is it to allow potentially other complex record types, other than the ISO ones?
import org.opengis.filter.sort.SortBy;
import org.springframework.beans.FatalBeanException;
+/**
+ * Abstract class for Record Descriptor that supports configurable Queryables. The queryables
+ * mapping is stored in the ${recordtype}.queryables.properties file which is automatically copied
+ * to the csw folder in the geoserver data directory.
+ */
public abstract class QueryableMappingRecordDescriptor extends AbstractRecordDescriptor {
private static final Logger LOGGER = Logging.getLogger(QueryableMappingRecordDescriptor.class);
|
codereview_new_java_data_7922
|
public void testValidatesAgainstDTD() throws Exception {
// get the wms 1.1.1 DTD
URL dtdURL =
GetCapabilitiesTransformer.class.getResource(
- "/schemas/wms/1.1.1/wms_ms_capabilities.dtd");
String dtd = Resources.toString(dtdURL, StandardCharsets.UTF_8);
try (InputStream dtdInputStream = new ByteArrayInputStream(dtd.getBytes())) {
I see this error in the logs:
2022-11-25T01:25:32.0940789Z 01:25:32,093 [ERROR] testValidatesAgainstDTD(org.geoserver.wms.capabilities.GetCapabilitiesTransformerTest) Time elapsed: 0.836 s <<< ERROR!
2022-11-25T01:25:32.0941352Z java.lang.NullPointerException
2022-11-25T01:25:32.0942044Z at org.geoserver.wms.capabilities.GetCapabilitiesTransformerTest.testValidatesAgainstDTD(GetCapabilitiesTransformerTest.java:470)
It works on Mac and Windows, but not on linux.
public void testValidatesAgainstDTD() throws Exception {
// get the wms 1.1.1 DTD
URL dtdURL =
GetCapabilitiesTransformer.class.getResource(
+ "/schemas/wms/1.1.1/WMS_MS_Capabilities.dtd");
String dtd = Resources.toString(dtdURL, StandardCharsets.UTF_8);
try (InputStream dtdInputStream = new ByteArrayInputStream(dtd.getBytes())) {
|
codereview_new_java_data_7923
|
public void testValidatesAgainstDTD() throws Exception {
// get the wms 1.1.1 DTD
URL dtdURL =
GetCapabilitiesTransformer.class.getResource(
- "/schemas/wms/1.1.1/wms_ms_capabilities.dtd");
String dtd = Resources.toString(dtdURL, StandardCharsets.UTF_8);
try (InputStream dtdInputStream = new ByteArrayInputStream(dtd.getBytes())) {
```suggestion
"/schemas/wms/1.1.1/WMS_MS_Capabilities.dtd");
```
Linux is case sensitive, mACos and wInDoWS are not.
public void testValidatesAgainstDTD() throws Exception {
// get the wms 1.1.1 DTD
URL dtdURL =
GetCapabilitiesTransformer.class.getResource(
+ "/schemas/wms/1.1.1/WMS_MS_Capabilities.dtd");
String dtd = Resources.toString(dtdURL, StandardCharsets.UTF_8);
try (InputStream dtdInputStream = new ByteArrayInputStream(dtd.getBytes())) {
|
codereview_new_java_data_7924
|
*/
public class SpatialJSONGetFeatureResponse extends GeoJSONGetFeatureResponse {
- // currently no logger required
- // private final Logger LOGGER = org.geotools.util.logging.Logging.getLogger(this.getClass());
private static String parseMimeType(String format) {
int pos = format.indexOf(';');
this can be deleted
```suggestion
```
*/
public class SpatialJSONGetFeatureResponse extends GeoJSONGetFeatureResponse {
private static String parseMimeType(String format) {
int pos = format.indexOf(';');
|
codereview_new_java_data_7925
|
protected ServiceDescription description(
}
/**
- * gets the name of the "version" parameter for the service. This will usually be "version", but
- * some (i.e. WCS 2+) it will be "acceptversions". To overrided by subclasses.
*
* @param service
- * @return
*/
protected String getVersionParameterName(Service service) {
return "version";
```suggestion
* Gets the name of the {@code version} parameter for the service. This will usually be {@code version}, but
* some (i.e. WCS 2+) it will be {@code acceptversions}. To overrided by subclasses.
*
* @param service
* @return version parameter of service, example {@code version} or {@code acceptversions}
```
protected ServiceDescription description(
}
/**
+ * Gets the name of the {@code version} parameter for the service. This will usually be {@code version}, but
+ * some (i.e. WCS 2+) it will be {@code acceptversions}. To overrided by subclasses.
*
* @param service
+ * @return version parameter of service, example {@code version} or {@code acceptversions}
*/
protected String getVersionParameterName(Service service) {
return "version";
|
codereview_new_java_data_7926
|
public void testLogLocationFromEmptyContext() throws Exception {
context.setInitParameter(
"GEOSERVER_LOG_LOCATION", new File(tmp, "foo.log").getAbsolutePath());
- ServletContextListener listener = new LoggingStartupContextListener();
- listener.contextInitialized(new ServletContextEvent(context));
- listener.contextDestroyed(new ServletContextEvent(context));
}
@Test
The test case does not seem to listen to the logs to see if an logging event is generated. I made an test appender for writing these kind of test cases; but this is a bit of a special case since there is not yet an existing logging setup to add a test appender to!
So this test case is good and we should keep it; but it does not check that logging messages for the log file being missing are indeed suppressed.
public void testLogLocationFromEmptyContext() throws Exception {
context.setInitParameter(
"GEOSERVER_LOG_LOCATION", new File(tmp, "foo.log").getAbsolutePath());
+ try (TestAppender appender = TestAppender.createAppender("quite", null)) {
+ appender.startRecording("org.geoserver.logging");
+ appender.trigger("Could not reconfigure LOG4J loggers");
+
+ ServletContextListener listener = new LoggingStartupContextListener();
+ listener.contextInitialized(new ServletContextEvent(context));
+ listener.contextDestroyed(new ServletContextEvent(context));
+
+ appender.stopRecording("org.geoserver.logging");
+ }
}
@Test
|
codereview_new_java_data_7927
|
package org.geoserver.wfs.v2_0;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.Assert.assertEquals;
import java.net.URL;
import org.custommonkey.xmlunit.XMLUnit;
import org.custommonkey.xmlunit.XpathEngine;
import org.geoserver.catalog.Catalog;
Copyright header missing ;-)
+/* (c) 2022 Open Source Geospatial Foundation - all rights reserved
+ * This code is licensed under the GPL 2.0 license, available at the root
+ * application directory.
+ */
package org.geoserver.wfs.v2_0;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.notNullValue;
import static org.junit.Assert.assertEquals;
import java.net.URL;
+
import org.custommonkey.xmlunit.XMLUnit;
import org.custommonkey.xmlunit.XpathEngine;
import org.geoserver.catalog.Catalog;
|
codereview_new_java_data_7928
|
protected void onSetUp(SystemTestData testData) throws Exception {
// alter the native BBOX of one raster for at least a pixel to make sure the declared
// bounds are used, but also fitted to the grid to avoid resampling.
- // Compared to the native bbox we are expanding by almost one pixel in all directions:
// 1 pixel
// ↓
// 1 pixel ←- -→ 1 pixel
```suggestion
// ↑
```
protected void onSetUp(SystemTestData testData) throws Exception {
// alter the native BBOX of one raster for at least a pixel to make sure the declared
// bounds are used, but also fitted to the grid to avoid resampling.
+ // Compared to the native bbox we are expanding by almost one pixel on both the west and
+ // east sides, and shrinking by one pixel on both the north and south ones:
// 1 pixel
// ↓
// 1 pixel ←- -→ 1 pixel
|
codereview_new_java_data_7929
|
protected void onSetUp(SystemTestData testData) throws Exception {
// alter the native BBOX of one raster for at least a pixel to make sure the declared
// bounds are used, but also fitted to the grid to avoid resampling.
- // Compared to the native bbox we are expanding by almost one pixel in all directions:
// 1 pixel
// ↓
// 1 pixel ←- -→ 1 pixel
```suggestion
// ↓
```
protected void onSetUp(SystemTestData testData) throws Exception {
// alter the native BBOX of one raster for at least a pixel to make sure the declared
// bounds are used, but also fitted to the grid to avoid resampling.
+ // Compared to the native bbox we are expanding by almost one pixel on both the west and
+ // east sides, and shrinking by one pixel on both the north and south ones:
// 1 pixel
// ↓
// 1 pixel ←- -→ 1 pixel
|
codereview_new_java_data_7930
|
public void cleanupLimitedSRS() {
@Test
public void testBasicKVP() throws Exception {
Document dom = getAsDOM("wcs?request=GetCapabilities&service=WCS");
- print(dom);
checkFullCapabilitiesDocument(dom);
}
Should that be commented again after development?
public void cleanupLimitedSRS() {
@Test
public void testBasicKVP() throws Exception {
Document dom = getAsDOM("wcs?request=GetCapabilities&service=WCS");
+ // print(dom);
checkFullCapabilitiesDocument(dom);
}
|
codereview_new_java_data_7931
|
-/* (c) 2022 Open Source Geospatial Foundation - all rights reserved
- * This code is licensed under the GPL 2.0 license, available at the root
- * application directory.
- */
-package org.geoserver.gwc.controller;
-
-import org.geoserver.catalog.Catalog;
-
-/**
- * Handler for mapping workspace-based web (i.e. web/openlayers/ol.js) requests to
- * non-workspace-based requests.
- */
-public class GwcRestWebUrlHandlerMapping extends GwcWmtsRestUrlHandlerMapping {
-
- public GwcRestWebUrlHandlerMapping(Catalog catalog) {
- super(catalog);
- GWC_URL_PATTERN = "/gwc/rest/web";
- }
-}
Unsure about this one... is this controller doing more or less the same thing as the "www" folder in GeoServer? If so maybe it should stay in gwc-core.
I don't see documentation for this endpoint:
* https://docs.geoserver.org/stable/en/user/geowebcache/rest/index.html
Nor I see an evident candidate for it here:
https://www.geowebcache.org/docs/current/rest/index.html
|
codereview_new_java_data_7932
|
/*
- * (c) 2018 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*
*/
-/* (c) 2016 Open Source Geospatial Foundation - all rights reserved
- * This code is licensed under the GPL 2.0 license, available at the root
- * application directory.
- */
package org.geoserver.security.oauth2;
import java.io.Serializable;
Double header here, not your fault but we could fix it.
/*
+ * (c) 2016-2022 Open Source Geospatial Foundation - all rights reserved
* This code is licensed under the GPL 2.0 license, available at the root
* application directory.
*
*/
package org.geoserver.security.oauth2;
import java.io.Serializable;
|
codereview_new_java_data_7933
|
import net.sf.json.JSONObject;
/**
- * make sure your Azure AD application has "GroupMember.Read.All" permission: a) go to your
* application in Azure AD (in the portal) b) On the left, go to "API permissions" c) click "Add a
* permission" d) press "Microsoft Graph" e) press "Delegated permission" f) Scroll down to
* "GroupMember" g) Choose "GroupMemeber.Read.All" h) press "Add permission" i) on the API
```suggestion
* Verify role using Azure graph.
*
* Make sure your Azure AD application has "GroupMember.Read.All" permission: a) go to your
```
import net.sf.json.JSONObject;
/**
+ * Verify role using Azure graph.
+ *
+ * Make sure your Azure AD application has "GroupMember.Read.All" permission: a) go to your
* application in Azure AD (in the portal) b) On the left, go to "API permissions" c) click "Add a
* permission" d) press "Microsoft Graph" e) press "Delegated permission" f) Scroll down to
* "GroupMember" g) Choose "GroupMemeber.Read.All" h) press "Add permission" i) on the API
|
codereview_new_java_data_7934
|
import org.springframework.security.oauth2.provider.token.RemoteTokenServices;
/** OpenID Connect authentication filter. */
public class OpenIdConnectAuthenticationFilter extends GeoServerOAuthAuthenticationFilter {
static final String ID_TOKEN_VALUE = "OpenIdConnect-IdTokenValue";
```suggestion
/**
* Authenticate using OpenID Connect.
*/
public class OpenIdConnectAuthenticationFilter extends GeoServerOAuthAuthenticationFilter {
```
import org.springframework.security.oauth2.provider.token.RemoteTokenServices;
/** OpenID Connect authentication filter. */
+/**
+ * Authenticate using OpenID Connect.
+ */
public class OpenIdConnectAuthenticationFilter extends GeoServerOAuthAuthenticationFilter {
static final String ID_TOKEN_VALUE = "OpenIdConnect-IdTokenValue";
|
codereview_new_java_data_7935
|
import org.geoserver.security.oauth2.OpenIdConnectFilterConfig;
/**
- * This is a simple token validator that runs a list of TokenValidators. This doesn't do any
- * validation on its own...
*/
public class MultiTokenValidator implements TokenValidator {
```suggestion
* This is a token validator that runs a list of TokenValidators. This doesn't do any
```
import org.geoserver.security.oauth2.OpenIdConnectFilterConfig;
/**
+ * This is a token validator that runs a list of TokenValidators. This doesn't do any validation on
+ * its own...
*/
public class MultiTokenValidator implements TokenValidator {
|
codereview_new_java_data_7936
|
* Dispatcher callback that sets and clears the {@link LocalWorkspace} and {@link LocalPublished}
* thread locals.
*
* @author Justin Deoliveira, OpenGeo
*/
public class LocalWorkspaceCallback implements DispatcherCallback, ExtensionPriority {
I don't get the intent of this change.
* Dispatcher callback that sets and clears the {@link LocalWorkspace} and {@link LocalPublished}
* thread locals.
*
+ * <p>This class is responsible for parsing request context information ({@code workspace/ows?},
+ * {@code layergroup/wms?}, {@code workspace/layer}) and resolving the intended local workspace,
+ * layer group, and/or layer information which are stored in a thread locale for reference.
+ *
+ * <p>The logic used here is duplicated by GeoServerHomePage which provides a user interface
+ * reflecting this context lookup process.
+ *
* @author Justin Deoliveira, OpenGeo
*/
public class LocalWorkspaceCallback implements DispatcherCallback, ExtensionPriority {
|
codereview_new_java_data_7937
|
public interface StoreInfo extends CatalogInfo {
*/
// <T extends Resource> Iterator<T> getResources(ProgressListener monitor) throws IOException;
}
```suggestion
* When true, the {@link ResourcePool} will automatically disable the store on connection failure. Defaults to false.
```
(I should have linked to the Resource
public interface StoreInfo extends CatalogInfo {
*/
// <T extends Resource> Iterator<T> getResources(ProgressListener monitor) throws IOException;
+ /**
+ * Get the auto disable flag on connection failure.
+ *
+ * @return true if the store should be disabled when a connection error happens. False
+ * otherwise.
+ */
+ boolean isDisableOnConnFailure();
+
+ /**
+ * Set the auto disable flag on connection failure
+ *
+ * @param disableOnConnFailure the auto disable flag on connection failure.
+ */
+ void setDisableOnConnFailure(boolean disableOnConnFailure);
}
|
codereview_new_java_data_7938
|
import org.geoserver.config.util.XStreamPersister;
import org.geoserver.config.util.XStreamPersisterInitializer;
-/** Extension point to enable emsa package name in the SecureXStream. */
public class JMSXStreamPersisterInitializer implements XStreamPersisterInitializer {
@Override
```suggestion
/** Extension point to enable JSM packages name in the SecureXStream. */
```
import org.geoserver.config.util.XStreamPersister;
import org.geoserver.config.util.XStreamPersisterInitializer;
+/** Extension point to enable JSM packages name in the SecureXStream. */
public class JMSXStreamPersisterInitializer implements XStreamPersisterInitializer {
@Override
|
codereview_new_java_data_7939
|
public void mangleURL(
// (for two reasons: a) speed; b) to make the admin aware of
// possible security liabilities)
- boolean workspaceEnabled = this.geoServer.getSettings().getWorkspace() != null;
- boolean doMangleHeaders =
- workspaceEnabled
- ? this.geoServer.getSettings().isUseHeadersProxyURL()
- : this.geoServer.getGlobal().isUseHeadersProxyURL();
if (proxyBase != null && doMangleHeaders) {
this.mangleURLHeaders(baseURL, proxyBase);
Please add some FINE level logging to know whats happening and the result of this logic conditions execution.
public void mangleURL(
// (for two reasons: a) speed; b) to make the admin aware of
// possible security liabilities)
+ boolean doMangleHeaders = geoServer.getSettings().isUseHeadersProxyURL();
if (proxyBase != null && doMangleHeaders) {
this.mangleURLHeaders(baseURL, proxyBase);
|
codereview_new_java_data_8122
|
/**
* Default {@link HttpRequest} implementation.
- *
- * @deprecated Use {@link HttpRequest#streaming(RequestHeaders)} instead.
*/
-@Deprecated
public class DefaultHttpRequest extends DefaultStreamMessage<HttpObject> implements HttpRequestWriter {
private final RequestHeaders headers;
/**
* Creates a new instance with the specified headers.
- *
- * @deprecated Use {@link HttpRequest#streaming(RequestHeaders)} instead.
*/
- @Deprecated
public DefaultHttpRequest(RequestHeaders headers) {
this.headers = requireNonNull(headers, "headers");
}
It is in `internal`. Do we need `@Deprecated`?
/**
* Default {@link HttpRequest} implementation.
*/
+@SuppressWarnings("deprecation")
public class DefaultHttpRequest extends DefaultStreamMessage<HttpObject> implements HttpRequestWriter {
private final RequestHeaders headers;
/**
* Creates a new instance with the specified headers.
*/
public DefaultHttpRequest(RequestHeaders headers) {
this.headers = requireNonNull(headers, "headers");
}
|
codereview_new_java_data_8123
|
public DnsAddressEndpointGroupBuilder selectionTimeoutMillis(long selectionTimeo
@Override
public DnsAddressEndpointGroupBuilder addDnsQuestionListeners(
- Iterable<DnsQuestionListener> dnsQuestionListeners) {
return (DnsAddressEndpointGroupBuilder) super.addDnsQuestionListeners(dnsQuestionListeners);
}
```suggestion
Iterable<? extends DnsQuestionListener> dnsQuestionListeners) {
```
public DnsAddressEndpointGroupBuilder selectionTimeoutMillis(long selectionTimeo
@Override
public DnsAddressEndpointGroupBuilder addDnsQuestionListeners(
+ Iterable<? extends DnsQuestionListener> dnsQuestionListeners) {
return (DnsAddressEndpointGroupBuilder) super.addDnsQuestionListeners(dnsQuestionListeners);
}
|
codereview_new_java_data_8124
|
*/
enum DefaultDnsQueryListener implements DnsQueryListener {
- DEFAULT_INSTANCE;
private final Logger logger = LoggerFactory.getLogger(getClass());
nit: just `INSTANCE`?
*/
enum DefaultDnsQueryListener implements DnsQueryListener {
+ INSTANCE;
private final Logger logger = LoggerFactory.getLogger(getClass());
|
codereview_new_java_data_8125
|
enum DefaultDnsQueryListener implements DnsQueryListener {
INSTANCE;
- private final Logger logger = LoggerFactory.getLogger(getClass());
@Override
public void onSuccess(List<DnsRecord> oldRecords, List<DnsRecord> newRecords, String logPrefix) {}
nit:
```suggestion
private static final Logger logger = LoggerFactory.getLogger(DefaultDnsQueryListener.class);
```
enum DefaultDnsQueryListener implements DnsQueryListener {
INSTANCE;
+ private static final Logger logger = LoggerFactory.getLogger(DefaultDnsQueryListener.class);
@Override
public void onSuccess(List<DnsRecord> oldRecords, List<DnsRecord> newRecords, String logPrefix) {}
|
codereview_new_java_data_8126
|
public interface RoutingContext {
/**
* Returns a wrapped {@link RoutingContext} which holds the specified {@link HttpMethod}.
*/
- default RoutingContext overrideMethod(HttpMethod method) {
requireNonNull(method, "method");
return new RoutingContextWrapper(this) {
@Override
`withMethod`? `with*` seems like a popular choice when creating a new instance.
public interface RoutingContext {
/**
* Returns a wrapped {@link RoutingContext} which holds the specified {@link HttpMethod}.
*/
+ default RoutingContext withMethod(HttpMethod method) {
requireNonNull(method, "method");
return new RoutingContextWrapper(this) {
@Override
|
codereview_new_java_data_8127
|
public String path() {
* Returns a wrapped {@link RoutingContext} which holds the specified {@code path}.
* It is usually used to find an {@link HttpService} with a prefix-stripped path.
*
- * @deprecated Use {@link #withPath} instead.
*/
@Deprecated
default RoutingContext overridePath(String path) {
- requireNonNull(path, "path");
- return new RoutingContextWrapper(this) {
- @Override
- public String path() {
- return path;
- }
- };
}
/**
```suggestion
* @deprecated Use {@link #withPath}.
```
public String path() {
* Returns a wrapped {@link RoutingContext} which holds the specified {@code path}.
* It is usually used to find an {@link HttpService} with a prefix-stripped path.
*
+ * @deprecated Use {@link #withPath}.
*/
@Deprecated
default RoutingContext overridePath(String path) {
+ return withPath(path);
}
/**
|
codereview_new_java_data_8128
|
void hashcodeRecalculateWhenMethodChange() {
MediaType.XML_UTF_8 + "; q=0.8"),
"/hello", null, null, RoutingStatus.OK);
final RoutingContext ctx3 = ctx1.withMethod(HttpMethod.POST);
- assertThat(ctx1).isNotEqualTo(ctx3);
- assertThat(ctx2).isEqualTo(ctx3);
}
static RoutingContext create(String path) {
Thanks for adding a test case :+1:
void hashcodeRecalculateWhenMethodChange() {
MediaType.XML_UTF_8 + "; q=0.8"),
"/hello", null, null, RoutingStatus.OK);
final RoutingContext ctx3 = ctx1.withMethod(HttpMethod.POST);
+ assertThat(ctx1.hashCode()).isNotEqualTo(ctx3.hashCode());
+ assertThat(ctx2.hashCode()).isEqualTo(ctx3.hashCode());
}
static RoutingContext create(String path) {
|
codereview_new_java_data_8129
|
public Routed<ServiceConfig> findServiceConfig(RoutingContext routingCtx, boolea
return routed;
case NOT_MATCHED:
if (routingCtx.method() == HttpMethod.HEAD) {
- return findServiceConfig(routingCtx.withMethod(HttpMethod.GET));
}
if (!useFallbackService) {
`useFallbackService` should be propagated.
```suggestion
return findServiceConfig(routingCtx.withMethod(HttpMethod.GET), useFallbackService);
```
public Routed<ServiceConfig> findServiceConfig(RoutingContext routingCtx, boolea
return routed;
case NOT_MATCHED:
if (routingCtx.method() == HttpMethod.HEAD) {
+ return findServiceConfig(routingCtx.withMethod(HttpMethod.GET), useFallbackService);
}
if (!useFallbackService) {
|
codereview_new_java_data_8130
|
import com.linecorp.armeria.server.annotation.Get;
import com.linecorp.armeria.testing.junit5.server.ServerExtension;
-public class AnnotatedServiceImplicitHeadTest {
@RegisterExtension
static final ServerExtension server = new ServerExtension() {
junit5 classes don't have to be public
```suggestion
class AnnotatedServiceImplicitHeadTest {
```
import com.linecorp.armeria.server.annotation.Get;
import com.linecorp.armeria.testing.junit5.server.ServerExtension;
+class AnnotatedServiceImplicitHeadTest {
@RegisterExtension
static final ServerExtension server = new ServerExtension() {
|
codereview_new_java_data_8131
|
void hashcodeRecalculateWhenMethodChange() {
MediaType.XML_UTF_8 + "; q=0.8"),
"/hello", null, null, RoutingStatus.OK);
final RoutingContext ctx3 = ctx1.withMethod(HttpMethod.POST);
- assertThat(ctx1).isNotEqualTo(ctx3);
- assertThat(ctx2).isEqualTo(ctx3);
}
static RoutingContext create(String path) {
If this is for checking hashcode, shouldn't we add hashcode?
`assertThat(ctx2.hashCode()).isEqualTo(ctx3.hashCode());`
void hashcodeRecalculateWhenMethodChange() {
MediaType.XML_UTF_8 + "; q=0.8"),
"/hello", null, null, RoutingStatus.OK);
final RoutingContext ctx3 = ctx1.withMethod(HttpMethod.POST);
+ assertThat(ctx1.hashCode()).isNotEqualTo(ctx3.hashCode());
+ assertThat(ctx2.hashCode()).isEqualTo(ctx3.hashCode());
}
static RoutingContext create(String path) {
|
codereview_new_java_data_8132
|
package com.linecorp.armeria.common.stream;
/**
- * A type which is both a {@link StreamMessage} and a {@link StreamWriter}. This type is mainly used by tests
- * which need to exercise both functionality.
*/
public interface StreamMessageWriter<T> extends StreamMessage<T>, StreamWriter<T> {
}
Could you also update the description?
package com.linecorp.armeria.common.stream;
/**
+ * A type which is both a {@link StreamMessage} and a {@link StreamWriter}.
+ * {@link StreamMessageWriter} publishes the objects written via {@link StreamWriter#write(Object)}.
*/
public interface StreamMessageWriter<T> extends StreamMessage<T>, StreamWriter<T> {
}
|
codereview_new_java_data_8133
|
static <T> StreamMessage<T> aborted(Throwable cause) {
* Creates a new {@link StreamMessageWriter} that publishes the objects written via
* {@link StreamWriter#write(Object)}.
*/
static <T> StreamMessageWriter<T> streaming() {
return new DefaultStreamMessage<>();
}
So you want to deprecate this class in the follow-up PR. Is it correct?
static <T> StreamMessage<T> aborted(Throwable cause) {
* Creates a new {@link StreamMessageWriter} that publishes the objects written via
* {@link StreamWriter#write(Object)}.
*/
+ @UnstableApi
static <T> StreamMessageWriter<T> streaming() {
return new DefaultStreamMessage<>();
}
|
codereview_new_java_data_8134
|
package com.linecorp.armeria.common.stream;
/**
* A type which is both a {@link StreamMessage} and a {@link StreamWriter}.
* {@link StreamMessageWriter} publishes the objects written via {@link StreamWriter#write(Object)}.
*/
public interface StreamMessageWriter<T> extends StreamMessage<T>, StreamWriter<T> {
}
```suggestion
@UnstableApi
public interface StreamMessageWriter<T> extends StreamMessage<T>, StreamWriter<T> {
```
package com.linecorp.armeria.common.stream;
+import com.linecorp.armeria.common.annotation.UnstableApi;
+
/**
* A type which is both a {@link StreamMessage} and a {@link StreamWriter}.
* {@link StreamMessageWriter} publishes the objects written via {@link StreamWriter#write(Object)}.
*/
+@UnstableApi
public interface StreamMessageWriter<T> extends StreamMessage<T>, StreamWriter<T> {
}
|
codereview_new_java_data_8135
|
static <T> StreamMessage<T> aborted(Throwable cause) {
* Creates a new {@link StreamMessageWriter} that publishes the objects written via
* {@link StreamWriter#write(Object)}.
*/
static <T> StreamMessageWriter<T> streaming() {
return new DefaultStreamMessage<>();
}
```suggestion
@UnstableApi
static <T> StreamMessageWriter<T> streaming() {
```
static <T> StreamMessage<T> aborted(Throwable cause) {
* Creates a new {@link StreamMessageWriter} that publishes the objects written via
* {@link StreamWriter#write(Object)}.
*/
+ @UnstableApi
static <T> StreamMessageWriter<T> streaming() {
return new DefaultStreamMessage<>();
}
|
codereview_new_java_data_8136
|
static void maybeCompletePreferredRecords(CompletableFuture<List<DnsRecord>> fut
results[order] = records;
}
- for (int i = 0; i < results.length; i++) {
- final Object result = results[i];
if (result == null) {
// A highly preferred question hasn't finished yet.
return;
nit:
```suggestion
for (Object result : results) {
```
static void maybeCompletePreferredRecords(CompletableFuture<List<DnsRecord>> fut
results[order] = records;
}
+ for (Object result : results) {
if (result == null) {
// A highly preferred question hasn't finished yet.
return;
|
codereview_new_java_data_8137
|
default boolean shutdownBlockingTaskExecutorOnStop() {
boolean isServerHeaderEnabled();
/**
- * Returns the {@link Supplier} that generates a {@link RequestId} for each {@link Request}.
*/
Function<RoutingContext, RequestId> requestIdGenerator();
I think we can also change the javadoc for this method
default boolean shutdownBlockingTaskExecutorOnStop() {
boolean isServerHeaderEnabled();
/**
+ * Returns the {@link Function} that generates a {@link RequestId} for each {@link Request}.
*/
Function<RoutingContext, RequestId> requestIdGenerator();
|
codereview_new_java_data_8138
|
public ServerBuilder setHeaders(
* Sets the {@link Supplier} which generates a {@link RequestId}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
- * @see RequestContext#id()
*/
- public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdGenerator) {
- final Supplier<? extends RequestId> requestIdSupplier = requireNonNull(requestIdGenerator);
- this.requestIdGenerator = routingContext -> requestIdSupplier.get();
- return this;
}
/**
- * Generate {@link RequestId} from {@link RoutingContext}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
* @see RequestContext#id()
Should we delegate to `requestIdGenerator(Function)`?
For example:
```java
requireNonNull(requestIdGenerator, "requestIdGenerator");
return requestIdGenerator(routingCtx -> requestIdSupplier.get());
```
public ServerBuilder setHeaders(
* Sets the {@link Supplier} which generates a {@link RequestId}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
+ * @deprecated this method is replaced by
+ * {@link #requestIdGenerator(Function<? super RoutingContext, ? extends RequestId>)}
*/
+ @Deprecated
+ public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdSupplier) {
+ return requestIdGenerator(routingContext -> requestIdSupplier.get());
}
/**
+ * Sets the {@link Function} that generates a {@link RequestId} for each {@link Request}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
* @see RequestContext#id()
|
codereview_new_java_data_8139
|
public ServerBuilder setHeaders(
* Sets the {@link Supplier} which generates a {@link RequestId}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
- * @see RequestContext#id()
*/
- public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdGenerator) {
- final Supplier<? extends RequestId> requestIdSupplier = requireNonNull(requestIdGenerator);
- this.requestIdGenerator = routingContext -> requestIdSupplier.get();
- return this;
}
/**
- * Generate {@link RequestId} from {@link RoutingContext}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
* @see RequestContext#id()
Should we add `@Deprecate` and `@deprecate` Javadoc?
public ServerBuilder setHeaders(
* Sets the {@link Supplier} which generates a {@link RequestId}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
+ * @deprecated this method is replaced by
+ * {@link #requestIdGenerator(Function<? super RoutingContext, ? extends RequestId>)}
*/
+ @Deprecated
+ public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdSupplier) {
+ return requestIdGenerator(routingContext -> requestIdSupplier.get());
}
/**
+ * Sets the {@link Function} that generates a {@link RequestId} for each {@link Request}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
* @see RequestContext#id()
|
codereview_new_java_data_8140
|
public ServerBuilder setHeaders(
* Sets the {@link Supplier} which generates a {@link RequestId}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
- * @see RequestContext#id()
*/
- public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdGenerator) {
- final Supplier<? extends RequestId> requestIdSupplier = requireNonNull(requestIdGenerator);
- this.requestIdGenerator = routingContext -> requestIdSupplier.get();
- return this;
}
/**
- * Generate {@link RequestId} from {@link RoutingContext}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
* @see RequestContext#id()
Should we update the Javadoc to describe the behavior of this method?
In fact, this method does not generate `RequestId` but takes a request ID generator.
public ServerBuilder setHeaders(
* Sets the {@link Supplier} which generates a {@link RequestId}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
+ * @deprecated this method is replaced by
+ * {@link #requestIdGenerator(Function<? super RoutingContext, ? extends RequestId>)}
*/
+ @Deprecated
+ public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdSupplier) {
+ return requestIdGenerator(routingContext -> requestIdSupplier.get());
}
/**
+ * Sets the {@link Function} that generates a {@link RequestId} for each {@link Request}.
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
* @see RequestContext#id()
|
codereview_new_java_data_8141
|
public ServerBuilder setHeaders(
*/
@Deprecated
public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdSupplier) {
return requestIdGenerator(routingContext -> requestIdSupplier.get());
}
nit:
```suggestion
public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdSupplier) {
requireNonNull(requestIdSupplier, "requestIdSupplier");
return requestIdGenerator(routingContext -> requestIdSupplier.get());
```
public ServerBuilder setHeaders(
*/
@Deprecated
public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdSupplier) {
+ requireNonNull(requestIdSupplier, "requestIdSupplier");
return requestIdGenerator(routingContext -> requestIdSupplier.get());
}
|
codereview_new_java_data_8142
|
private volatile boolean callClosed;
DeferredListener(ServerCall<I, ?> serverCall, CompletableFuture<ServerCall.Listener<I>> listenerFuture) {
- checkState(serverCall instanceof AbstractServerCall, "Cannot use %s with non-Armeria gRPC server",
AsyncServerInterceptor.class.getName());
final AbstractServerCall<I, ?> armeriaServerCall = (AbstractServerCall<I, ?>) serverCall;
```suggestion
checkState(serverCall instanceof AbstractServerCall, "Cannot use %s with a non-Armeria gRPC server",
```
private volatile boolean callClosed;
DeferredListener(ServerCall<I, ?> serverCall, CompletableFuture<ServerCall.Listener<I>> listenerFuture) {
+ checkState(serverCall instanceof AbstractServerCall, "Cannot use %s with a non-Armeria gRPC server",
AsyncServerInterceptor.class.getName());
final AbstractServerCall<I, ?> armeriaServerCall = (AbstractServerCall<I, ?>) serverCall;
|
codereview_new_java_data_8143
|
static Set<AnnotatedValueResolver> uniqueResolverSet() {
String o2Name = o2.httpElementName();
final Class<? extends Annotation> o1AnnotationType = o1.annotationType();
final Class<? extends Annotation> o2AnnotationType = o2.annotationType();
- if (o1Name.equals(o2Name) && o1AnnotationType == o2AnnotationType) {
return 0;
}
nit: let's do the reference equality check first which is cheaper:
static Set<AnnotatedValueResolver> uniqueResolverSet() {
String o2Name = o2.httpElementName();
final Class<? extends Annotation> o1AnnotationType = o1.annotationType();
final Class<? extends Annotation> o2AnnotationType = o2.annotationType();
+ if (o1AnnotationType == o2AnnotationType && o1Name.equals(o2Name)) {
return 0;
}
|
codereview_new_java_data_8144
|
void assertSquareBracketsInPath() {
assertThat(res2).isNotNull();
assertThat(res2.path()).isNotEqualTo("/#%2F:@!$&'()*+,;=?");
}
-
}
@ikhoon not sure about this, but kindly guide/let me know your view
void assertSquareBracketsInPath() {
assertThat(res2).isNotNull();
assertThat(res2.path()).isNotEqualTo("/#%2F:@!$&'()*+,;=?");
}
}
|
codereview_new_java_data_8145
|
private static PathAndQuery parse(@Nullable String rawPath, boolean allowDoubleD
@Test
void assertSquareBracketsInPath() {
- final PathAndQuery res = parse("/#/:@[]!$&'()*+,;=");
assertThat(res).isNotNull();
- assertThat(res.path()).isNotEqualTo("/#/:@!$&'()*+,;=");
final PathAndQuery res2 =
parse("/%23%2F%3A%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D%3F");
assertThat(res2).isNotNull();
- assertThat(res2.path()).isNotEqualTo("/#%2F:@!$&'()*+,;=?");
}
}
How about showing the path instead so that reviewers can easily check the returned value?
`assertThat(res.path()).isEqualTo("/#/:@%5B%5D!$&'()*+,;=");`
private static PathAndQuery parse(@Nullable String rawPath, boolean allowDoubleD
@Test
void assertSquareBracketsInPath() {
+ final PathAndQuery res = parse("/#/:@[]!$&'()*+,;=");
assertThat(res).isNotNull();
+ assertThat(res.path()).isEqualTo("/#/:@%5B%5D!$&'()*+,;=");
final PathAndQuery res2 =
parse("/%23%2F%3A%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D%3F");
assertThat(res2).isNotNull();
+ assertThat(res2.path()).isEqualTo("/#%2F:%5B%5D@!$&'()*+,;=?");
}
}
|
codereview_new_java_data_8146
|
private static PathAndQuery parse(@Nullable String rawPath, boolean allowDoubleD
@Test
void assertSquareBracketsInPath() {
- final PathAndQuery res = parse("/#/:@[]!$&'()*+,;=");
assertThat(res).isNotNull();
- assertThat(res.path()).isNotEqualTo("/#/:@!$&'()*+,;=");
final PathAndQuery res2 =
parse("/%23%2F%3A%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D%3F");
assertThat(res2).isNotNull();
- assertThat(res2.path()).isNotEqualTo("/#%2F:@!$&'()*+,;=?");
}
}
What is this for? Could you explain about this test please?
private static PathAndQuery parse(@Nullable String rawPath, boolean allowDoubleD
@Test
void assertSquareBracketsInPath() {
+ final PathAndQuery res = parse("/#/:@[]!$&'()*+,;=");
assertThat(res).isNotNull();
+ assertThat(res.path()).isEqualTo("/#/:@%5B%5D!$&'()*+,;=");
final PathAndQuery res2 =
parse("/%23%2F%3A%5B%5D%40%21%24%26%27%28%29%2A%2B%2C%3B%3D%3F");
assertThat(res2).isNotNull();
+ assertThat(res2.path()).isEqualTo("/#%2F:%5B%5D@!$&'()*+,;=?");
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.