id
stringlengths 29
30
| content
stringlengths 152
2.6k
|
|---|---|
codereview_new_java_data_12105
|
public void close() throws Exception {
}
}
-class MemeoryMetadataStoreProvider implements MetadataStoreProvider {
@Override
public String urlScheme() {
spelling mistake in class name
public void close() throws Exception {
}
}
+class MemoryMetadataStoreProvider implements MetadataStoreProvider {
@Override
public String urlScheme() {
|
codereview_new_java_data_12106
|
public class WebSocketProxyConfiguration implements PulsarConfiguration {
@FieldContext(doc = "Timeout of idling WebSocket session (in milliseconds)")
private int webSocketSessionIdleTimeoutMillis = 300000;
- @FieldContext(doc = "Interval of time to sending the ping to keep alive")
private int webSocketPingDurationSeconds = -1;
@FieldContext(doc = "When this parameter is not empty, unauthenticated users perform as anonymousUserRole")
1. Is there a minimum limit when this configuration is > 0?
2. If `webSocketPingDurationSeconds`<=0 means disabled, does it need to be stated in the doc?
public class WebSocketProxyConfiguration implements PulsarConfiguration {
@FieldContext(doc = "Timeout of idling WebSocket session (in milliseconds)")
private int webSocketSessionIdleTimeoutMillis = 300000;
+ @FieldContext(doc = "Interval of time to sending the ping to keep alive. This value greater than 0 means enabled")
private int webSocketPingDurationSeconds = -1;
@FieldContext(doc = "When this parameter is not empty, unauthenticated users perform as anonymousUserRole")
|
codereview_new_java_data_12107
|
public CompletableFuture<Optional<Topic>> getTopic(final TopicName topicName, bo
CompletableFuture<Optional<Topic>> res = createNonPersistentTopic(name);
- CompletableFuture eventFuture = topicEventsDispatcher
.notifyOnCompletion(res, topicName.toString(), TopicEvent.CREATE);
topicEventsDispatcher
.notifyOnCompletion(eventFuture, topicName.toString(), TopicEvent.LOAD);
```suggestion
CompletableFuture<Optional<Topic>> eventFuture = topicEventsDispatcher
```
public CompletableFuture<Optional<Topic>> getTopic(final TopicName topicName, bo
CompletableFuture<Optional<Topic>> res = createNonPersistentTopic(name);
+ CompletableFuture<Optional<Topic>> eventFuture = topicEventsDispatcher
.notifyOnCompletion(res, topicName.toString(), TopicEvent.CREATE);
topicEventsDispatcher
.notifyOnCompletion(eventFuture, topicName.toString(), TopicEvent.LOAD);
|
codereview_new_java_data_12108
|
private void internalReadFromLedger(ReadHandle ledger, OpReadEntry opReadEntry)
if (firstValidEntry == -1L) {
firstValidEntry = entryId;
}
- }
- if (firstValidEntry != -1L) {
lastValidEntry = entryId;
}
}
It looks like this logic just works for the `else` branch. Can we merge it?
private void internalReadFromLedger(ReadHandle ledger, OpReadEntry opReadEntry)
if (firstValidEntry == -1L) {
firstValidEntry = entryId;
}
lastValidEntry = entryId;
}
}
|
codereview_new_java_data_12109
|
public void testMessageRedeliveryWhenTimeoutInListener() throws Exception {
BlockingQueue<Message<byte[]>> receivedMsgs = new LinkedBlockingQueue<>();
MessageListener<byte[]> listener = (consumer, msg) -> {
try {
- // the first msg will wait until timeout
if (index.getAndDecrement() == 0 && new String(msg.getData()).equals(redeliveryMsg)) {
- Thread.sleep(5000);
}
receivedMsgs.add(msg);
consumer.acknowledge(msg);
may be 3000 is enough
public void testMessageRedeliveryWhenTimeoutInListener() throws Exception {
BlockingQueue<Message<byte[]>> receivedMsgs = new LinkedBlockingQueue<>();
MessageListener<byte[]> listener = (consumer, msg) -> {
try {
+ // the first "Hello-0" will wait until timeout
if (index.getAndDecrement() == 0 && new String(msg.getData()).equals(redeliveryMsg)) {
+ Thread.sleep(3000);
}
receivedMsgs.add(msg);
consumer.acknowledge(msg);
|
codereview_new_java_data_12110
|
private void handleReleaseEvent(String serviceUnit, ServiceUnitStateData data) {
private void handleSplitEvent(String serviceUnit, ServiceUnitStateData data) {
if (isTargetBroker(data.broker())) {
splitServiceUnit(serviceUnit, data)
- .whenComplete((__, e) -> {
- if (e != null) {
- // When has exception, change the bundle state back to Splitting -> Owned .
- pubAsync(serviceUnit, new ServiceUnitStateData(Owned, data.broker(), data.sourceBroker()));
- log(e, serviceUnit, data, null);
- }
-
- });
}
}
I am sorry. I thought about this again, and in fact, this revert `Splitting -> Owned` can cause an inconsistent state if the exception is thrown after the children partitions are updated in the metadata store or children bundles are created.
Let's keep the exception-handling logic simple (do not revert the state here). Currently, the ServiceUnitStateChannel recovery logic will automatically tombstone the ownerships of any in-flight state service units, which I think is enough.
private void handleReleaseEvent(String serviceUnit, ServiceUnitStateData data) {
private void handleSplitEvent(String serviceUnit, ServiceUnitStateData data) {
if (isTargetBroker(data.broker())) {
splitServiceUnit(serviceUnit, data)
+ .whenComplete((__, e) -> log(e, serviceUnit, data, null));
}
}
|
codereview_new_java_data_12111
|
void createNameSpace(String cluster, String publicTenant, NamespaceName ns) thro
try {
broker.getAdminClient().namespaces().createNamespace(ns.toString());
} catch (Exception e) {
- log.warn(e.getMessage());
}
}
}
```suggestion
log.warn("...", e);
```
Besides, if the default manner to start a standalone cluster falls back to this branch, how can a standalone cluster successfully create the namespace (with what extra configs)?
void createNameSpace(String cluster, String publicTenant, NamespaceName ns) thro
try {
broker.getAdminClient().namespaces().createNamespace(ns.toString());
} catch (Exception e) {
+ log.warn("Failed to create the default namespace {}: {}", ns, e.getMessage());
}
}
}
|
codereview_new_java_data_12112
|
public void testCreateSchemaAfterDeletion() throws Exception {
Student student = new Student();
student.setName("Tom Jerry");
student.setAge(30);
- student.setGpa(6);
student.setGpa(10);
producer.send(student);
Is there any reason to invoke `setGpa` twice?
public void testCreateSchemaAfterDeletion() throws Exception {
Student student = new Student();
student.setName("Tom Jerry");
student.setAge(30);
student.setGpa(10);
producer.send(student);
|
codereview_new_java_data_12308
|
public class ImagesServiceInfoImpl extends ServiceInfoImpl implements ImagesServ
@Override
public String getType() {
- return "ImagesService";
}
@Override
```suggestion
return "Images";
```
public class ImagesServiceInfoImpl extends ServiceInfoImpl implements ImagesServ
@Override
public String getType() {
+ return "Images";
}
@Override
|
codereview_new_java_data_12309
|
public String getType() {
return simpleName.substring(0, truncate);
} else {
// this default, while incorrect, has the greatest chance of
- // success acorss data directories
return getName().toUpperCase();
}
}
```suggestion
// success across data directories
```
public String getType() {
return simpleName.substring(0, truncate);
} else {
// this default, while incorrect, has the greatest chance of
+ // success across data directories
return getName().toUpperCase();
}
}
|
codereview_new_java_data_12310
|
public GeoJSONGetFeatureResponse(GeoServer gs, String format) {
/**
* Constructor to be used by subclasses.
*
- * @param outputFormat The well-known name of the format, not <code>null</code>
- * @param jsonp <code>true</code> if specified format uses JSONP
*/
protected GeoJSONGetFeatureResponse(GeoServer gs, String format, boolean jsonp) {
super(gs, format);
prefer using `{@code ...} instead of deprecated `<code>...</code>` markup.
```suggestion
* @param outputFormat The well-known name of the format, not {@code null}
* @param jsonp {@code true} if specified format uses JSONP
```
public GeoJSONGetFeatureResponse(GeoServer gs, String format) {
/**
* Constructor to be used by subclasses.
*
+ * @param outputFormat The well-known name of the format, not {@code null}
+ * @param jsonp {@code true} if specified format uses JSONP
*/
protected GeoJSONGetFeatureResponse(GeoServer gs, String format, boolean jsonp) {
super(gs, format);
|
codereview_new_java_data_12311
|
private void setupNetCDFoutSettings(QName name) {
private void setupNetCDFoutSettings(QName name, boolean setNoData) {
CoverageInfo info = getCatalog().getCoverageByName(getLayerId(name));
- // // Set the Declared SRS
- // info.setSRS("EPSG:4326");
- // info.setProjectionPolicy(ProjectionPolicy.REPROJECT_TO_DECLARED);
-
String layerName = name.getLocalPart().toUpperCase();
boolean isPackedLayer = layerName.contains("PACKED");
boolean isCF = layerName.contains("CF");
Are these comments intentionally left here?
private void setupNetCDFoutSettings(QName name) {
private void setupNetCDFoutSettings(QName name, boolean setNoData) {
CoverageInfo info = getCatalog().getCoverageByName(getLayerId(name));
String layerName = name.getLocalPart().toUpperCase();
boolean isPackedLayer = layerName.contains("PACKED");
boolean isCF = layerName.contains("CF");
|
codereview_new_java_data_12312
|
import org.geoserver.security.filter.GeoServerSecurityFilter;
import org.geoserver.security.oauth2.bearer.TokenValidator;
public class OpenIdConnectAuthenticationProvider extends GeoServerOAuthAuthenticationProvider {
TokenValidator bearerTokenValidator;
```suggestion
/**
* AuthenticationProvider for OpenId Connect.
*/
public class OpenIdConnectAuthenticationProvider extends GeoServerOAuthAuthenticationProvider {
```
import org.geoserver.security.filter.GeoServerSecurityFilter;
import org.geoserver.security.oauth2.bearer.TokenValidator;
+/**
+ * AuthenticationProvider for OpenId Connect.
+ */
public class OpenIdConnectAuthenticationProvider extends GeoServerOAuthAuthenticationProvider {
TokenValidator bearerTokenValidator;
|
codereview_new_java_data_12313
|
public class OpenIdConnectFilterConfig extends GeoServerOAuth2FilterConfig {
String tokenRolesClaim;
String responseMode;
boolean sendClientSecret = false;
- boolean allowBearerTokens = false;
/** Supports extraction of roles among the token claims */
public static enum OpenIdRoleSource implements RoleSource {
Since bearer authentication was already working although without validation of the token, maybe this flag could be kept as true by default.
public class OpenIdConnectFilterConfig extends GeoServerOAuth2FilterConfig {
String tokenRolesClaim;
String responseMode;
boolean sendClientSecret = false;
+ boolean allowBearerTokens = true;
/** Supports extraction of roles among the token claims */
public static enum OpenIdRoleSource implements RoleSource {
|
codereview_new_java_data_12314
|
static ResourceAccessManager lookupResourceAccessManager() throws Exception {
}
// should never happen,just in case we have multiple singleton beans
// of type DefaultResourceAccessManager
- if (manager == null) manager = managers.get(size - 1);
CatalogFilterAccessManager lwManager = new CatalogFilterAccessManager();
lwManager.setDelegate(manager);
Maybe better to use managers.get(0), if multiple managers are registered the first one should be the one with the highest priority (if the ExtensionPriority interface has been used)
static ResourceAccessManager lookupResourceAccessManager() throws Exception {
}
// should never happen,just in case we have multiple singleton beans
// of type DefaultResourceAccessManager
+ if (manager == null) manager = managers.get(0);
CatalogFilterAccessManager lwManager = new CatalogFilterAccessManager();
lwManager.setDelegate(manager);
|
codereview_new_java_data_12315
|
public void disAssociateRoleFromGroup(GeoServerRole role, String groupname) thro
roles.remove(role);
setModified(true);
}
- if (helper.group_roleMap.get(groupname) != null && roles != null && roles.size() == 0) {
helper.group_roleMap.remove(groupname);
}
}
Surprised the PMD rule on the usage of isEmpty did not catch this one, but to be consistent with the resto of the code:
```suggestion
if (helper.group_roleMap.get(groupname) != null && roles != null && roles.isEmpty()) {
```
public void disAssociateRoleFromGroup(GeoServerRole role, String groupname) thro
roles.remove(role);
setModified(true);
}
+ if (helper.group_roleMap.get(groupname) != null && roles != null && roles.isEmpty()) {
helper.group_roleMap.remove(groupname);
}
}
|
codereview_new_java_data_12389
|
private static String maybeTransformAbsoluteUri(
throw newInvalidPathException(path);
}
- if (path.equals(newPath) || !isValidHttp2Path(newPath)) {
throw newInvalidPathException(path);
}
minor question) Is it possible to just check `isValidHttp2Path`?
I'm thinking about the worst case where `String` isn't stored in the constant pool and each character has to be compared. I'm wondering if just doing the `isValidHttp2Path` check is faster/simpler.
```suggestion
if (!isValidHttp2Path(newPath)) {
```
private static String maybeTransformAbsoluteUri(
throw newInvalidPathException(path);
}
+ if (!isValidHttp2Path(newPath)) {
throw newInvalidPathException(path);
}
|
codereview_new_java_data_12390
|
import org.slf4j.LoggerFactory;
import com.linecorp.armeria.common.annotation.Nullable;
-import com.linecorp.armeria.common.annotation.UnstableApi;
import io.netty.handler.codec.dns.DnsRecord;
/**
* A default implementation of {@link DnsQuestionListener} interface.
*/
-@UnstableApi
final class DefaultDnsQuestionListener implements DnsQuestionListener {
static final DnsQuestionListener DEFAULT_INSTANCE = new DefaultDnsQuestionListener();
We don't `@UnstableApi` for non-public class
```suggestion
```
import org.slf4j.LoggerFactory;
import com.linecorp.armeria.common.annotation.Nullable;
import io.netty.handler.codec.dns.DnsRecord;
/**
* A default implementation of {@link DnsQuestionListener} interface.
*/
final class DefaultDnsQuestionListener implements DnsQuestionListener {
static final DnsQuestionListener DEFAULT_INSTANCE = new DefaultDnsQuestionListener();
|
codereview_new_java_data_12391
|
/**
* A default implementation of {@link DnsQueryListener} interface.
*/
-final class DefaultDnsQueryListener implements DnsQueryListener {
- static final DnsQueryListener DEFAULT_INSTANCE = new DefaultDnsQueryListener();
private final Logger logger = LoggerFactory.getLogger(getClass());
nit: We could use the enum singleton pattern?
/**
* A default implementation of {@link DnsQueryListener} interface.
*/
+enum DefaultDnsQueryListener implements DnsQueryListener {
+ DEFAULT_INSTANCE;
private final Logger logger = LoggerFactory.getLogger(getClass());
|
codereview_new_java_data_12392
|
public String path() {
* Returns a wrapped {@link RoutingContext} which holds the specified {@code path}.
* It is usually used to find an {@link HttpService} with a prefix-stripped path.
*
- * @deprecated Use {@link #withPath} instead.
*/
@Deprecated
default RoutingContext overridePath(String path) {
- requireNonNull(path, "path");
- return new RoutingContextWrapper(this) {
- @Override
- public String path() {
- return path;
- }
- };
}
/**
Could delegate to `withPath()` rather than just copying and pasting
public String path() {
* Returns a wrapped {@link RoutingContext} which holds the specified {@code path}.
* It is usually used to find an {@link HttpService} with a prefix-stripped path.
*
+ * @deprecated Use {@link #withPath}.
*/
@Deprecated
default RoutingContext overridePath(String path) {
+ return withPath(path);
}
/**
|
codereview_new_java_data_12393
|
static <T> StreamMessage<T> aborted(Throwable cause) {
}
/**
- * Creates a new {@link StreamMessageWriter} that produces the objects to be published
- * by {@link StreamMessage}.
*/
static <T> StreamMessageWriter<T> streaming() {
return new DefaultStreamMessage<>();
https://github.com/line/armeria/blob/bc48105148d652e2e48e788c94bec007882c31dc/core/src/main/java/com/linecorp/armeria/common/stream/StreamWriter.java#L31
I referred description from `StreamWriter`. If you have any better comment, please share to me~ I'll update it!
static <T> StreamMessage<T> aborted(Throwable cause) {
}
/**
+ * Creates a new {@link StreamMessageWriter} that publishes the objects written via
+ * {@link StreamWriter#write(Object)}.
*/
static <T> StreamMessageWriter<T> streaming() {
return new DefaultStreamMessage<>();
|
codereview_new_java_data_12394
|
public ServerBuilder setHeaders(
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
* @deprecated this method is replaced by
- * {@link #requestIdGenerator(Function<? super RoutingContext, ? extends RequestId>)}
*/
@Deprecated
public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdSupplier) {
```suggestion
* @deprecated Use
```
public ServerBuilder setHeaders(
* By default, a {@link RequestId} is generated from a random 64-bit integer.
*
* @deprecated this method is replaced by
+ * {@link #requestIdGenerator(Function)}
*/
@Deprecated
public ServerBuilder requestIdGenerator(Supplier<? extends RequestId> requestIdSupplier) {
|
codereview_new_java_data_12395
|
private volatile boolean callClosed;
DeferredListener(ServerCall<I, ?> serverCall, CompletableFuture<ServerCall.Listener<I>> listenerFuture) {
- checkState(serverCall instanceof AbstractServerCall, "Cannot use %s with non-Armeria gRPC server",
AsyncServerInterceptor.class.getName());
final AbstractServerCall<I, ?> armeriaServerCall = (AbstractServerCall<I, ?>) serverCall;
nit:
```suggestion
//noinspection unchecked
final AbstractServerCall<I, ?> armeriaServerCall = (AbstractServerCall<I, ?>) serverCall;
```
private volatile boolean callClosed;
DeferredListener(ServerCall<I, ?> serverCall, CompletableFuture<ServerCall.Listener<I>> listenerFuture) {
+ checkState(serverCall instanceof AbstractServerCall, "Cannot use %s with a non-Armeria gRPC server",
AsyncServerInterceptor.class.getName());
final AbstractServerCall<I, ?> armeriaServerCall = (AbstractServerCall<I, ?>) serverCall;
|
codereview_new_java_data_12396
|
private volatile boolean callClosed;
DeferredListener(ServerCall<I, ?> serverCall, CompletableFuture<ServerCall.Listener<I>> listenerFuture) {
- checkState(serverCall instanceof AbstractServerCall, "Cannot use %s with non-Armeria gRPC server",
AsyncServerInterceptor.class.getName());
final AbstractServerCall<I, ?> armeriaServerCall = (AbstractServerCall<I, ?>) serverCall;
Is this field accessed by two threads?
private volatile boolean callClosed;
DeferredListener(ServerCall<I, ?> serverCall, CompletableFuture<ServerCall.Listener<I>> listenerFuture) {
+ checkState(serverCall instanceof AbstractServerCall, "Cannot use %s with a non-Armeria gRPC server",
AsyncServerInterceptor.class.getName());
final AbstractServerCall<I, ?> armeriaServerCall = (AbstractServerCall<I, ?>) serverCall;
|
codereview_new_java_data_12397
|
private static Map<String, Field> buildFields(Descriptor desc,
throw new RecursiveTypeException(field.getMessageType());
}
- @Nullable
- Descriptor typeDesc =
- desc.getNestedTypes().stream()
- .filter(d -> d.getFullName().equals(field.getMessageType().getFullName()))
- .findFirst().orElse(null);
- if (typeDesc == null) {
- typeDesc = field.getMessageType();
- }
- checkState(typeDesc != null,
- "Descriptor for the type '%s' does not exist.",
- field.getMessageType().getFullName());
try {
builder.putAll(buildFields(typeDesc,
ImmutableList.<String>builder()
Should we also remove `desc.getNestedTypes().stream()...` block?
It seems we directly assign `typeDesc` from `field.getMessageType()` without `@Nullable`.
private static Map<String, Field> buildFields(Descriptor desc,
throw new RecursiveTypeException(field.getMessageType());
}
+ final Descriptor typeDesc = field.getMessageType();
try {
builder.putAll(buildFields(typeDesc,
ImmutableList.<String>builder()
|
codereview_new_java_data_12398
|
void error_withMessage(UnitTestServiceBlockingStub blockingClient) throws Except
assertThat(grpcStatus).isNotNull();
assertThat(grpcStatus.getCode()).isEqualTo(Code.ABORTED);
assertThat(grpcStatus.getDescription()).isEqualTo("aborted call");
- final StatusException ex = (StatusException)rpcRes.cause();
assertThat(ex.getStatus().getCode()).isEqualTo(Code.ABORTED);
assertThat(ex.getStatus().getDescription()).isEqualTo("aborted call");
assertThat(ex.getTrailers().getAll(STRING_VALUE_KEY))
nit: Code style
```suggestion
final StatusException ex = (StatusException) rpcRes.cause();
```
void error_withMessage(UnitTestServiceBlockingStub blockingClient) throws Except
assertThat(grpcStatus).isNotNull();
assertThat(grpcStatus.getCode()).isEqualTo(Code.ABORTED);
assertThat(grpcStatus.getDescription()).isEqualTo("aborted call");
+ final StatusException ex = (StatusException) rpcRes.cause();
assertThat(ex.getStatus().getCode()).isEqualTo(Code.ABORTED);
assertThat(ex.getStatus().getDescription()).isEqualTo("aborted call");
assertThat(ex.getTrailers().getAll(STRING_VALUE_KEY))
|
codereview_new_java_data_12399
|
private void close(Status status, Metadata metadata) {
if (status.isOk()) {
req.abort();
} else {
- req.abort(status.asRuntimeException(metadata));
}
if (upstream != null) {
upstream.cancel();
We also need to use `StatusException` here instead of `StatusRuntimeException`.
https://github.com/grpc/grpc-java/issues/4683
private void close(Status status, Metadata metadata) {
if (status.isOk()) {
req.abort();
} else {
+ req.abort(status.asException(metadata));
}
if (upstream != null) {
upstream.cancel();
|
codereview_new_java_data_12400
|
protected HttpData fetchDecoderOutput() {
}
private void maybeCheckOverflow(@Nullable ByteBuf decoded, ByteBuf newBuf) {
- if (maxLength <= 0) {
return;
}
How about also returning if it's `Integer.MAX_VALUE`?
protected HttpData fetchDecoderOutput() {
}
private void maybeCheckOverflow(@Nullable ByteBuf decoded, ByteBuf newBuf) {
+ if (maxLength <= 0 || maxLength == Integer.MAX_VALUE) {
return;
}
|
codereview_new_java_data_12401
|
static final class ExceptionOverview extends RuntimeException {
private static final long serialVersionUID = 3875212506787802066L;
- private static final ReentrantLock reentrantLock = new ReentrantLock();
-
ExceptionOverview(String message) {
super(message);
}
@Override
public Throwable fillInStackTrace() {
- reentrantLock.lock();
- try {
- return this;
- } finally {
- reentrantLock.unlock();
- }
}
}
I think we don't need a lock for this.
static final class ExceptionOverview extends RuntimeException {
private static final long serialVersionUID = 3875212506787802066L;
ExceptionOverview(String message) {
super(message);
}
@Override
public Throwable fillInStackTrace() {
+ return this;
}
}
|
codereview_new_java_data_12402
|
import io.netty.util.AttributeMap;
/**
- * A {@link ConnectionPoolListener} to count the number of connections which has been open and closed.
*/
public final class CountingConnectionPoolListener implements ConnectionPoolListener {
since connections is plural
```suggestion
* A {@link ConnectionPoolListener} to count the number of connections which have been open and closed.
```
import io.netty.util.AttributeMap;
/**
+ * A {@link ConnectionPoolListener} to count the number of connections which have been open and closed.
*/
public final class CountingConnectionPoolListener implements ConnectionPoolListener {
|
codereview_new_java_data_12403
|
protected final void onConnectionError(ChannelHandlerContext ctx, boolean outbou
handlingConnectionError = true;
if (Exceptions.isExpected(cause) || isGoAwaySentException(cause, connection())) {
- logger.debug("{} HTTP/2 connection error:", ctx.channel(), cause);
} else {
logger.warn("{} HTTP/2 connection error:", ctx.channel(), cause);
}
As you know the error is safely ignored. How about setting the level to `trace` to log the `cause` only when users really want to delve into something?
protected final void onConnectionError(ChannelHandlerContext ctx, boolean outbou
handlingConnectionError = true;
if (Exceptions.isExpected(cause) || isGoAwaySentException(cause, connection())) {
+ logger.trace("{} HTTP/2 connection error:", ctx.channel(), cause);
} else {
logger.warn("{} HTTP/2 connection error:", ctx.channel(), cause);
}
|
codereview_new_java_data_12404
|
public String toString() {
if (!isEndOfStream()) {
return toString;
}
- return "{EOS}, " + toString;
}
}
nit: place EOS after `toString`? Users might be less interested in EOS than the actual content.
public String toString() {
if (!isEndOfStream()) {
return toString;
}
+ return toString + ", {EOS}";
}
}
|
codereview_new_java_data_12405
|
import java.lang.annotation.Target;
import java.util.concurrent.TimeUnit;
import com.linecorp.armeria.server.annotation.DecoratorFactory;
/**
- * Annotation for request timeout
*/
@DecoratorFactory(RequestTimeoutDecoratorFunction.class)
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.TYPE, ElementType.METHOD })
public @interface RequestTimeout {
/**
- * Value of request timeout to set
*/
long value();
/**
- * Time unit of request timeout to set
*/
- TimeUnit unit();
}
should this be named RequestTimeoutDecorator, as other decorating annotations are named?
import java.lang.annotation.Target;
import java.util.concurrent.TimeUnit;
+import com.linecorp.armeria.common.util.TimeoutMode;
import com.linecorp.armeria.server.annotation.DecoratorFactory;
/**
+ * Annotation for request timeout.
*/
@DecoratorFactory(RequestTimeoutDecoratorFunction.class)
@Retention(RetentionPolicy.RUNTIME)
@Target({ ElementType.TYPE, ElementType.METHOD })
public @interface RequestTimeout {
/**
+ * Value of request timeout to set.
*/
long value();
/**
+ * Time unit of request timeout to set.
*/
+ TimeUnit unit() default TimeUnit.MILLISECONDS;
+
+ /**
+ * Timeout mode of request timeout to set.
+ */
+ TimeoutMode timeoutMode() default TimeoutMode.SET_FROM_START;
}
|
codereview_new_java_data_12406
|
public String timeoutSeconds(ServiceRequestContext ctx, HttpRequest req) {
AnnotatedServiceTest.validateContextAndRequest(ctx, req);
return Long.toString(ctx.requestTimeoutMillis());
}
-
- @Get("/subscriberIsInitialized")
- public String subscriberIsInitialized(ServiceRequestContext ctx, HttpRequest req) {
- AnnotatedServiceTest.validateContextAndRequest(ctx, req);
- final boolean isInitialized = ((DefaultServiceRequestContext) ctx)
- .requestCancellationScheduler().isInitialized();
- return Boolean.toString(isInitialized);
- }
}
@Test
void testRequestTimeoutSet() {
- final BlockingWebClient client = BlockingWebClient.of(server.httpUri());
AggregatedHttpResponse response;
Could be removed?
public String timeoutSeconds(ServiceRequestContext ctx, HttpRequest req) {
AnnotatedServiceTest.validateContextAndRequest(ctx, req);
return Long.toString(ctx.requestTimeoutMillis());
}
}
@Test
void testRequestTimeoutSet() {
+ final BlockingWebClient client = server.blockingWebClient();
AggregatedHttpResponse response;
|
codereview_new_java_data_12407
|
public void subscribe(Subscriber<? super HttpObject> subscriber, EventExecutor e
SubscriptionOption... options) {
requireNonNull(subscriber, "subscriber");
requireNonNull(executor, "executor");
- executor.execute(() -> {
- subscriber.onSubscribe(NoopSubscription.get());
- subscriber.onComplete();
- completionFuture.complete(null);
- });
}
@Override
- Do we need to reschedule even when it is in the event loop?
- Don't we need to abort a late subscriber?
public void subscribe(Subscriber<? super HttpObject> subscriber, EventExecutor e
SubscriptionOption... options) {
requireNonNull(subscriber, "subscriber");
requireNonNull(executor, "executor");
+ if (executor.inEventLoop()) {
+ subscribe0(subscriber);
+ } else {
+ executor.execute(() -> subscribe0(subscriber));
+ }
+ }
+
+ private void subscribe0(Subscriber<? super HttpObject> subscriber) {
+ subscriber.onSubscribe(NoopSubscription.get());
+ subscriber.onComplete();
+ completionFuture.complete(null);
}
@Override
|
codereview_new_java_data_12408
|
import graphql.GraphQLError;
/**
- * Error handler which map a GraphQL errors to an {@link HttpResponse}.
*/
@FunctionalInterface
public interface GraphqlErrorsHandler {
```suggestion
* A handler that maps GraphQL errors to an {@link HttpResponse}.
```
import graphql.GraphQLError;
/**
+ * A handler that maps GraphQL errors to an {@link HttpResponse}.
*/
@FunctionalInterface
public interface GraphqlErrorsHandler {
|
codereview_new_java_data_12409
|
protected void configure(ServerBuilder sb) throws Exception {
final File graphqlSchemaFile =
new File(getClass().getResource("/test.graphqls").toURI());
- final GraphqlErrorHandler errorsHandler
= (ctx, input, result, negotiatedProduceType, cause) -> {
final List<GraphQLError> errors = result.getErrors();
if (errors.stream().map(GraphQLError::getMessage).anyMatch(m -> m.endsWith("foo"))) {
```suggestion
final GraphqlErrorHandler errorHandler
```
protected void configure(ServerBuilder sb) throws Exception {
final File graphqlSchemaFile =
new File(getClass().getResource("/test.graphqls").toURI());
+ final GraphqlErrorHandler errorHandler
= (ctx, input, result, negotiatedProduceType, cause) -> {
final List<GraphQLError> errors = result.getErrors();
if (errors.stream().map(GraphQLError::getMessage).anyMatch(m -> m.endsWith("foo"))) {
|
codereview_new_java_data_12410
|
public boolean isOpen() {
@Override
public boolean isEmpty() {
return false;
}
Should it be `true` if `parts` is an `EmptyStreamMessage`?
public boolean isOpen() {
@Override
public boolean isEmpty() {
+ // This is always false even parts.isEmpty() == true.
+ // It's because isEmpty() is called after this multipart is converted into a StreamMessage and the
+ // StreamMessage produces at least a closing boundary.
return false;
}
|
codereview_new_java_data_12411
|
static HttpJsonTranscodingOptions of() {
}
/**
- * Returns the {@link HttpJsonTranscodingQueryParamNaming}s which is used to match fields in a
* {@link Message} with query parameters.
*/
- Set<HttpJsonTranscodingQueryParamNaming> queryParamNamings();
/**
* Return the {@link UnframedGrpcErrorHandler} which handles an exception raised while serving a gRPC
super minor nit;
Not too strong on this, but wondering if `naming` makes it hard to guess what this method does at first glance.
What do you think of `queryParamMatchRule`?
static HttpJsonTranscodingOptions of() {
}
/**
+ * Returns the {@link HttpJsonTranscodingQueryParamMatchRule}s which is used to match fields in a
* {@link Message} with query parameters.
*/
+ Set<HttpJsonTranscodingQueryParamMatchRule> queryParamMatchRules();
/**
* Return the {@link UnframedGrpcErrorHandler} which handles an exception raised while serving a gRPC
|
codereview_new_java_data_12412
|
private void respond(ChannelHandlerContext ctx, ServiceRequestContext reqCtx,
respond(reqCtx, false, resHeaders, resContent, cause).addListener(CLOSE);
}
-// if (!isReading) {
ctx.flush();
-// }
}
private ChannelFuture respond(ServiceRequestContext reqCtx, boolean addKeepAlive,
seems like we need to revert this
private void respond(ChannelHandlerContext ctx, ServiceRequestContext reqCtx,
respond(reqCtx, false, resHeaders, resContent, cause).addListener(CLOSE);
}
+ if (!isReading) {
ctx.flush();
+ }
}
private ChannelFuture respond(ServiceRequestContext reqCtx, boolean addKeepAlive,
|
codereview_new_java_data_12413
|
void writeAfterAborted() {
.peek(x -> {
if (x == 13) {
streamMessage.abort();
- await().untilAsserted(() -> assertThat(streamMessage.isOpen()).isFalse());
}
})
.map(ByteStreamMessageOutputStreamTest::httpData);
A `FixedStreamMessage` is always closed because all elements are given when it is created.
https://github.com/line/armeria/blob/871d87297e4d051241589cb1ae95641cbc83f880/core/src/main/java/com/linecorp/armeria/internal/common/stream/FixedStreamMessage.java#L87
If you want to know whether all elements of the stream are drained by `abort()`, you can use `.whenComplete().join()` and check the raised exception.
void writeAfterAborted() {
.peek(x -> {
if (x == 13) {
streamMessage.abort();
+ streamMessage.whenComplete().join();
}
})
.map(ByteStreamMessageOutputStreamTest::httpData);
|
codereview_new_java_data_12414
|
void onRemoval(DnsQuestion question, @Nullable List<DnsRecord> records,
/**
* Invoked when an eviction occurred for the {@link DnsRecord}s. The eviction may occur due to exceeding
- * a maximum size or timed expiration. The cause may vary according to the
* {@link DnsCacheBuilder#cacheSpec(String)} of the {@link DnsCache}.
*
* @param question the DNS question.
```suggestion
* a maximum size or timed expiration. The cause may vary depending on the
```
void onRemoval(DnsQuestion question, @Nullable List<DnsRecord> records,
/**
* Invoked when an eviction occurred for the {@link DnsRecord}s. The eviction may occur due to exceeding
+ * a maximum size or timed expiration. The cause may vary depending on the
* {@link DnsCacheBuilder#cacheSpec(String)} of the {@link DnsCache}.
*
* @param question the DNS question.
|
codereview_new_java_data_12415
|
public static List<AnnotatedServiceElement> find(
private static HttpStatus defaultResponseStatus(Method method, Class<?> clazz) {
final StatusCode statusCodeAnnotation = AnnotationUtil.findFirst(method, StatusCode.class);
if (statusCodeAnnotation != null) {
- final int statusCode = statusCodeAnnotation.value();
- checkArgument(statusCode >= 0,
- "invalid HTTP status code: %s (expected: >= 0)", statusCode);
- return HttpStatus.valueOf(statusCode);
}
if (!producibleMediaTypes(method, clazz).isEmpty()) {
Not releated to this PR but we can remove this validation because `HttpStatus.valueOf()` throws IAE if the status is negative.
```suggestion
```
public static List<AnnotatedServiceElement> find(
private static HttpStatus defaultResponseStatus(Method method, Class<?> clazz) {
final StatusCode statusCodeAnnotation = AnnotationUtil.findFirst(method, StatusCode.class);
if (statusCodeAnnotation != null) {
+ return HttpStatus.valueOf(statusCodeAnnotation.value());
}
if (!producibleMediaTypes(method, clazz).isEmpty()) {
|
codereview_new_java_data_12416
|
private void cleanup() {
}
if (!unfinishedRequests.isEmpty()) {
- final boolean cancel;
- final Exception cause;
- if (protocol.isMultiplex()) {
- // An HTTP2 request is cancelled by Http2RequestDecoder.onRstStreamRead()
- cancel = false;
- cause = ClosedStreamException.get();
- } else {
- cancel = true;
- cause = ClosedSessionException.get();
- }
unfinishedRequests.forEach((req, res) -> {
// Mark the request stream as closed due to disconnection.
req.abortResponse(cause, cancel);
});
This method is called when a channel becomes inactive. So should we revert this?
private void cleanup() {
}
if (!unfinishedRequests.isEmpty()) {
+ final ClosedSessionException cause = ClosedSessionException.get();
unfinishedRequests.forEach((req, res) -> {
+ // An HTTP2 request is cancelled by Http2RequestDecoder.onRstStreamRead()
+ final boolean cancel = !protocol.isMultiplex();
// Mark the request stream as closed due to disconnection.
req.abortResponse(cause, cancel);
});
|
codereview_new_java_data_12417
|
abstract class AggregationSupport {
* Aggregates an {@link HttpMessage} into an {@link AggregatedHttpMessage} using
* the specified {@link AggregationOptions}.
*
- * <p>Note that this method is added for internal usage. Therefore, <strong>must not</strong> override or
* call this method if you are not familiar with Armeria's internal implementation.
*/
@UnstableApi
The subject seems to be missing
```suggestion
* <p>Note that this method is added for internal usage. Therefore, you <strong>must not</strong> override or
```
abstract class AggregationSupport {
* Aggregates an {@link HttpMessage} into an {@link AggregatedHttpMessage} using
* the specified {@link AggregationOptions}.
*
+ * <p>Note that this method is added for internal usage. Therefore, you <strong>must not</strong> override or
* call this method if you are not familiar with Armeria's internal implementation.
*/
@UnstableApi
|
codereview_new_java_data_12418
|
import io.netty.util.concurrent.EventExecutor;
-final class FuseableStreamMessage<T, U> extends AbstractStreamMessage<U> {
static <T> FuseableStreamMessage<T, T> of(StreamMessage<? extends T> source,
Predicate<? super T> predicate) {
Question) I guess there is no problem, but just wanted to know why we also modify these classes to extend `AbstractStreamMessage`. From my understanding, these classes won't be supporting `aggregate(AggregationOptions)`. Is my understanding correct? (Since they don't extend `Http[Request|Response]`.
import io.netty.util.concurrent.EventExecutor;
+final class FuseableStreamMessage<T, U> implements StreamMessage<U> {
static <T> FuseableStreamMessage<T, T> of(StreamMessage<? extends T> source,
Predicate<? super T> predicate) {
|
codereview_new_java_data_12419
|
default HttpRequest withHeaders(RequestHeadersBuilder newHeadersBuilder) {
/**
* Aggregates this request with the specified {@link AggregationOptions}. The returned
* {@link CompletableFuture} will be notified when the content and the trailers of the request are
- * received fully.
* <pre>{@code
* AggregationOptions options =
* AggregationOptions.builder()
```suggestion
* fully received.
```
default HttpRequest withHeaders(RequestHeadersBuilder newHeadersBuilder) {
/**
* Aggregates this request with the specified {@link AggregationOptions}. The returned
* {@link CompletableFuture} will be notified when the content and the trailers of the request are
+ * fully received.
* <pre>{@code
* AggregationOptions options =
* AggregationOptions.builder()
|
codereview_new_java_data_12420
|
static void deframeAndRespond(ServiceRequestContext ctx,
}
final MediaType grpcMediaType = grpcResponse.contentType();
- requireNonNull(grpcMediaType);
final ResponseHeadersBuilder unframedHeaders = grpcResponse.headers().toBuilder();
unframedHeaders.set(GrpcHeaderNames.GRPC_STATUS, grpcStatusCode); // grpcStatusCode is 0 which is OK.
if (responseContentType != null) {
We shouldn't throw exceptions in this method since the exception isn't propagated to the response.
```
res.completeExceptionally(new NullPointerException("MediaType is undefined"));
return;
```
static void deframeAndRespond(ServiceRequestContext ctx,
}
final MediaType grpcMediaType = grpcResponse.contentType();
+ if (grpcMediaType == null) {
+ PooledObjects.close(grpcResponse.content());
+ res.completeExceptionally(new NullPointerException("MediaType is undefined"));
+ return;
+ }
+
final ResponseHeadersBuilder unframedHeaders = grpcResponse.headers().toBuilder();
unframedHeaders.set(GrpcHeaderNames.GRPC_STATUS, grpcStatusCode); // grpcStatusCode is 0 which is OK.
if (responseContentType != null) {
|
codereview_new_java_data_12421
|
void shouldClosePooledObjectsForNonOK() {
void shouldClosePooledObjectsForMissingGrpcStatus() {
final CompletableFuture<HttpResponse> res = new CompletableFuture<>();
final ByteBuf byteBuf = Unpooled.buffer();
- final ResponseHeaders responseHeaders = ResponseHeaders.builder(HttpStatus.OK)
- .build();
final AggregatedHttpResponse framedResponse = AggregatedHttpResponse.of(responseHeaders,
HttpData.wrap(byteBuf));
UnframedGrpcService.deframeAndRespond(ctx, framedResponse, res, UnframedGrpcErrorHandler.of(), null);
```suggestion
final ResponseHeaders responseHeaders = ResponseHeaders.of(HttpStatus.OK);
```
void shouldClosePooledObjectsForNonOK() {
void shouldClosePooledObjectsForMissingGrpcStatus() {
final CompletableFuture<HttpResponse> res = new CompletableFuture<>();
final ByteBuf byteBuf = Unpooled.buffer();
+ final ResponseHeaders responseHeaders = ResponseHeaders.of(HttpStatus.OK);
final AggregatedHttpResponse framedResponse = AggregatedHttpResponse.of(responseHeaders,
HttpData.wrap(byteBuf));
UnframedGrpcService.deframeAndRespond(ctx, framedResponse, res, UnframedGrpcErrorHandler.of(), null);
|
codereview_new_java_data_12422
|
void protobufStreamResponse(String path) {
final AggregatedHttpResponse response = client.get(path).aggregate().join();
assertThat(response.status()).isEqualTo(HttpStatus.INTERNAL_SERVER_ERROR);
assertThat(cause).isInstanceOf(IllegalStateException.class)
- .hasMessageContaining("cannot convert a");
}
@Test
Note for reviewers.
The exception is used to be raised by `ProtobufResponseConverterFunction`. https://github.com/line/armeria/blob/master/protobuf/src/main/java/com/linecorp/armeria/server/protobuf/ProtobufResponseConverterFunction.java#L243-L244`
The exception is now wrapped by the `CompositeResponseConverterFunction`.
https://github.com/line/armeria/blob/master/core/src/main/java/com/linecorp/armeria/internal/server/annotation/CompositeResponseConverterFunction.java#L71-L75
void protobufStreamResponse(String path) {
final AggregatedHttpResponse response = client.get(path).aggregate().join();
assertThat(response.status()).isEqualTo(HttpStatus.INTERNAL_SERVER_ERROR);
assertThat(cause).isInstanceOf(IllegalStateException.class)
+ .hasMessageContaining("cannot convert a result to HttpResponse");
+ assertThat(cause.getCause())
+ .isInstanceOf(IllegalArgumentException.class)
+ .hasMessageContaining("Cannot convert a")
+ .hasMessageContaining("to Protocol Buffers wire format");
}
@Test
|
codereview_new_java_data_12423
|
/**
* A {@link ResponseConverterFunction} provider interface which provides a
- * {@link ResponseConverterFunction} for converting an object of the given type.
*/
@UnstableApi
@FunctionalInterface
How about
```suggestion
* A {@link ResponseConverterFunction} provider interface which provides a
* {@link ResponseConverterFunction} that converts an object of the given type to an {@link HttpResponse}
* using the delegating {@link ResponseConverterFunction}.
* The delegating converter is a collection of several converters that you specify when
* {@linkplain ServerBuilder#annotatedService(Object, Object...) creating an annotated service} and
* Armeria default converters.
```
/**
* A {@link ResponseConverterFunction} provider interface which provides a
+ * {@link ResponseConverterFunction} that converts an object of the given type to an {@link HttpResponse}
+ * using the delegating {@link ResponseConverterFunction}.
+ * The delegating converter is a collection of several converters that you specify when
+ * {@linkplain ServerBuilder#annotatedService(Object, Object...) creating an annotated service} and
+ * Armeria default converters.
*/
@UnstableApi
@FunctionalInterface
|
codereview_new_java_data_12424
|
package com.linecorp.armeria.server.annotation;
/**
* Supported markup types in {@link Description}.
*/
public enum Markup {
NONE,
MARKDOWN,
Let's add `@UnstableApi`.
package com.linecorp.armeria.server.annotation;
+import com.linecorp.armeria.common.annotation.UnstableApi;
+
/**
* Supported markup types in {@link Description}.
*/
+@UnstableApi
public enum Markup {
NONE,
MARKDOWN,
|
codereview_new_java_data_12425
|
public interface DocServicePlugin {
// TODO(trustin): How do we specify the docstring of a method return value?
/**
- * Loads the documentation description infos include strings and markup types
* that describes services and their methods, enums and their values and
* structs/exceptions and their fields. The {@link Map} returned by this method will contain the
* documentation strings identified by the key strings that conforms to one of the following formats:
This sentence isn't grammatically correct. What do you think of just doing the following?
```suggestion
* Loads the {@link DescriptionInfo} that describes services and their methods, enums and their values and
```
public interface DocServicePlugin {
// TODO(trustin): How do we specify the docstring of a method return value?
/**
+ * Loads the {@link DescriptionInfo} that describes services and their methods, enums and their values and
* that describes services and their methods, enums and their values and
* structs/exceptions and their fields. The {@link Map} returned by this method will contain the
* documentation strings identified by the key strings that conforms to one of the following formats:
|
codereview_new_java_data_12426
|
public interface NamedTypeInfo {
String name();
/**
- * Returns the description object. If not available, an null value is returned.
*/
@JsonProperty
@JsonInclude(Include.NON_NULL)
```suggestion
* Returns the description information. If not available, a null value is returned.
```
public interface NamedTypeInfo {
String name();
/**
+ * Returns the description information. If not available, a null value is returned.
*/
@JsonProperty
@JsonInclude(Include.NON_NULL)
|
codereview_new_java_data_12427
|
default StreamMessage<T> recoverAndResume(
* // In this case, CompletionException is returned. (can't recover exception)
* misMatchRecovered.collect().join();
* }</pre>
- * */
@UnstableApi
default <E extends Throwable> StreamMessage<T> recoverAndResume(Class<E> causeClass,
Function<? super E, ? extends StreamMessage<T>> function) {
```suggestion
*/
```
default StreamMessage<T> recoverAndResume(
* // In this case, CompletionException is returned. (can't recover exception)
* misMatchRecovered.collect().join();
* }</pre>
+ */
@UnstableApi
default <E extends Throwable> StreamMessage<T> recoverAndResume(Class<E> causeClass,
Function<? super E, ? extends StreamMessage<T>> function) {
|
codereview_new_java_data_12428
|
public void testNoBrokerAclAuthorizer() throws Exception {
when(describeAclsResult.values()).thenReturn(describeAclsFuture);
when(sourceAdmin.describeAcls(any())).thenReturn(describeAclsResult);
- try (LogCaptureAppender connectorLogs = LogCaptureAppender.createAndRegister(MirrorSourceConnector.class);) {
LogCaptureAppender.setClassLoggerToTrace(MirrorSourceConnector.class);
connector.syncTopicAcls();
long aclSyncDisableMessages = connectorLogs.getMessages().stream()
Nit: unnecessary semicolon
public void testNoBrokerAclAuthorizer() throws Exception {
when(describeAclsResult.values()).thenReturn(describeAclsFuture);
when(sourceAdmin.describeAcls(any())).thenReturn(describeAclsResult);
+ try (LogCaptureAppender connectorLogs = LogCaptureAppender.createAndRegister(MirrorSourceConnector.class)) {
LogCaptureAppender.setClassLoggerToTrace(MirrorSourceConnector.class);
connector.syncTopicAcls();
long aclSyncDisableMessages = connectorLogs.getMessages().stream()
|
codereview_new_java_data_12430
|
public boolean equals(Object o) {
public int hashCode() {
int result = fetchedData != null ? fetchedData.hashCode() : 0;
result = 31 * result + (divergingEpoch != null ? divergingEpoch.hashCode() : 0);
- result = 31 * result + (int) (highWatermark ^ (highWatermark >>> 32));
- result = 31 * result + (int) (logStartOffset ^ (logStartOffset >>> 32));
- result = 31 * result + (int) (logEndOffset ^ (logEndOffset >>> 32));
- result = 31 * result + (int) (lastStableOffset ^ (lastStableOffset >>> 32));
return result;
}
We should use `Long.hashCode` in many of these lines.
public boolean equals(Object o) {
public int hashCode() {
int result = fetchedData != null ? fetchedData.hashCode() : 0;
result = 31 * result + (divergingEpoch != null ? divergingEpoch.hashCode() : 0);
+ result = 31 * result + Long.hashCode(highWatermark);
+ result = 31 * result + Long.hashCode(logStartOffset);
+ result = 31 * result + Long.hashCode(logEndOffset);
+ result = 31 * result + Long.hashCode(lastStableOffset);
return result;
}
|
codereview_new_java_data_12431
|
public void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCo
CompletableFuture<Void> future = commit(offsets);
future.whenComplete((r, t) -> {
if (t != null) {
- callback.onComplete(offsets, new RuntimeException(t));
} else {
callback.onComplete(offsets, null);
}
Any reason why this is a RuntimeException rather than a KafkaException?
public void commitAsync(Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCo
CompletableFuture<Void> future = commit(offsets);
future.whenComplete((r, t) -> {
if (t != null) {
+ callback.onComplete(offsets, new KafkaException(t));
} else {
callback.onComplete(offsets, null);
}
|
codereview_new_java_data_12432
|
protected synchronized long timeToNextHeartbeat(long now) {
// we don't need to send heartbeats
if (state.hasNotJoinedGroup())
return Long.MAX_VALUE;
- if (heartbeatThread != null) {
- if (heartbeatThread.hasFailed()) {
- // if an exception occurs in the heartbeat thread, raise it.
- throw heartbeatThread.failureCause();
- }
}
return heartbeat.timeToNextHeartbeat(now);
}
We could 1 line it ya?
```
if (heartbeatThread != null && heartbeatThread.hasFailed()) {
...
}
```
protected synchronized long timeToNextHeartbeat(long now) {
// we don't need to send heartbeats
if (state.hasNotJoinedGroup())
return Long.MAX_VALUE;
+ if (heartbeatThread != null && heartbeatThread.hasFailed()) {
+ // if an exception occurs in the heartbeat thread, raise it.
+ throw heartbeatThread.failureCause();
}
return heartbeat.timeToNextHeartbeat(now);
}
|
codereview_new_java_data_12433
|
public RollParams(long maxSegmentMs,
this.now = now;
}
- public static RollParams create(LogConfig config, LogAppendInfo appendInfo, int messagesSize, long now) {
- return new RollParams(config.maxSegmentMs(),
- config.segmentSize,
- appendInfo.maxTimestamp(),
- appendInfo.lastOffset(),
- messagesSize,
- now);
- }
-
@Override
public String toString() {
return "RollParams(" +
Would it be better for this to be a separate constructor?
public RollParams(long maxSegmentMs,
this.now = now;
}
@Override
public String toString() {
return "RollParams(" +
|
codereview_new_java_data_12435
|
public void shouldRecordPollIdleRatio() {
raftMetrics.updatePollStart(time.milliseconds());
time.sleep(5);
- // Measurement arrives before poll end
assertEquals(0.6, getMetric(metrics, "poll-idle-ratio-avg").metricValue());
// More idle time for 5ms
time.sleep(5);
raftMetrics.updatePollEnd(time.milliseconds());
// The measurement includes the interval beginning at the last recording.
- // This counts 10ms of busy time and 10ms of idle time.
assertEquals(0.5, getMetric(metrics, "poll-idle-ratio-avg").metricValue());
}
How about documenting that this measurement is for busy of 40ms and idle of 60ms?
public void shouldRecordPollIdleRatio() {
raftMetrics.updatePollStart(time.milliseconds());
time.sleep(5);
+ // Measurement arrives before poll end, so we have 40ms busy time and 60ms idle.
+ // The subsequent interval time is not counted until the next measurement.
assertEquals(0.6, getMetric(metrics, "poll-idle-ratio-avg").metricValue());
// More idle time for 5ms
time.sleep(5);
raftMetrics.updatePollEnd(time.milliseconds());
// The measurement includes the interval beginning at the last recording.
+ // This counts 10ms of busy time and 5ms + 5ms = 10ms of idle time.
assertEquals(0.5, getMetric(metrics, "poll-idle-ratio-avg").metricValue());
}
|
codereview_new_java_data_12436
|
import java.util.Optional;
public class ReplicaAlterLogDirsTierStateMachine implements TierStateMachine {
public PartitionFetchState start(TopicPartition topicPartition,
PartitionFetchState currentFetchState,
FetchRequest.PartitionData fetchPartitionData) throws Exception {
// JBOD is not supported with tiered storage.
- throw new UnsupportedOperationException("Building remote log aux state not supported in ReplicaAlterLogDirsThread.");
}
public Optional<PartitionFetchState> maybeAdvanceState(TopicPartition topicPartition,
nit: Building remote log aux state is not supported in ReplicaAlterLogDirsThread.
import java.util.Optional;
+/**
+ The replica alter log dirs tier state machine is unsupported but is provided to the ReplicaAlterLogDirsThread.
+ */
public class ReplicaAlterLogDirsTierStateMachine implements TierStateMachine {
public PartitionFetchState start(TopicPartition topicPartition,
PartitionFetchState currentFetchState,
FetchRequest.PartitionData fetchPartitionData) throws Exception {
// JBOD is not supported with tiered storage.
+ throw new UnsupportedOperationException("Building remote log aux state is not supported in ReplicaAlterLogDirsThread.");
}
public Optional<PartitionFetchState> maybeAdvanceState(TopicPartition topicPartition,
|
codereview_new_java_data_12438
|
public R apply(R record) {
@Override
public void close() {
- Utils.closeQuietly(delegate, "predicated transformation");
Utils.closeQuietly(predicate, "predicate");
}
```suggestion
Utils.closeQuietly(delegate, "transformation");
```
nit: Since this may or may not be a predicated transformation now.
public R apply(R record) {
@Override
public void close() {
+ Utils.closeQuietly(delegate, "transformation");
Utils.closeQuietly(predicate, "predicate");
}
|
codereview_new_java_data_12439
|
import java.util.Set;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThrows;
This unused import is causing a checkstyle failure.
import java.util.Set;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThrows;
|
codereview_new_java_data_12440
|
import org.apache.kafka.connect.transforms.predicates.Predicate;
/**
- * Wrapper for a {@link Transformation} and corresponding optional {@link Predicate }
* which applies the transformation when the {@link Predicate} is true (or false, according to {@code negate}).
* If no {@link Predicate} is provided, the transformation will be unconditionally applied.
* @param <R> The type of record (must be an implementation of {@link ConnectRecord})
Nit: whitespace
```suggestion
* Wrapper for a {@link Transformation} and corresponding optional {@link Predicate}
```
import org.apache.kafka.connect.transforms.predicates.Predicate;
/**
+ * Wrapper for a {@link Transformation} and corresponding optional {@link Predicate}
* which applies the transformation when the {@link Predicate} is true (or false, according to {@code negate}).
* If no {@link Predicate} is provided, the transformation will be unconditionally applied.
* @param <R> The type of record (must be an implementation of {@link ConnectRecord})
|
codereview_new_java_data_12441
|
protected Consumer<K, V> createConsumer() {
}
/**
- * Test whether a topic partition should be read by this log.
- * <p>Overridden by subclasses when only a subset of the assigned partitions should be read into memory.
- * By default, this will read all partitions.
* @param topicPartition A topic partition which could be read by this log.
* @return true if the partition should be read by this log, false if its contents should be ignored.
*/
Some nits:
```suggestion
* Signals whether a topic partition should be read by this log. Invoked on {@link #start() startup} once
* for every partition found in the log's backing topic.
* <p>This method can be overridden by subclasses when only a subset of the assigned partitions
* should be read into memory. By default, all partitions are read.
```
protected Consumer<K, V> createConsumer() {
}
/**
+ * Signals whether a topic partition should be read by this log. Invoked on {@link #start() startup} once
+ * for every partition found in the log's backing topic.
+ * <p>This method can be overridden by subclasses when only a subset of the assigned partitions
+ * should be read into memory. By default, all partitions are read.
* @param topicPartition A topic partition which could be read by this log.
* @return true if the partition should be read by this log, false if its contents should be ignored.
*/
|
codereview_new_java_data_12447
|
public NavigableMap<Integer, Object> read(ByteBuffer buffer) {
}
prevTag = tag;
int size = ByteUtils.readUnsignedVarint(buffer);
- if (size < 0 && isNullable())
- return null;
- else if (size < 0)
throw new SchemaException("field size " + size + " cannot be negative");
if (size > buffer.remaining())
throw new SchemaException("Error reading field of size " + size + ", only " + buffer.remaining() + " bytes available");
`TaggedFields` are never nullable. We can combine this with if clause below.
public NavigableMap<Integer, Object> read(ByteBuffer buffer) {
}
prevTag = tag;
int size = ByteUtils.readUnsignedVarint(buffer);
+ if (size < 0)
throw new SchemaException("field size " + size + " cannot be negative");
if (size > buffer.remaining())
throw new SchemaException("Error reading field of size " + size + ", only " + buffer.remaining() + " bytes available");
|
codereview_new_java_data_12449
|
private static void createTopic(Optional<String> propertiesFile, String brokers,
Properties adminProps = loadPropsWithBootstrapServers(propertiesFile, brokers);
Admin adminClient = Admin.create(adminProps);
NewTopic newTopic = new NewTopic(topic, defaultNumPartitions, defaultReplicationFactor);
- adminClient.createTopics(Collections.singletonList(newTopic));
try {
adminClient.createTopics(Collections.singleton(newTopic)).all().get();
} catch (ExecutionException | InterruptedException e) {
You need to remove this line to avoid `TopicExistsException` when the test topic does not exist. Once you do that, I'll approve.
private static void createTopic(Optional<String> propertiesFile, String brokers,
Properties adminProps = loadPropsWithBootstrapServers(propertiesFile, brokers);
Admin adminClient = Admin.create(adminProps);
NewTopic newTopic = new NewTopic(topic, defaultNumPartitions, defaultReplicationFactor);
try {
adminClient.createTopics(Collections.singleton(newTopic)).all().get();
} catch (ExecutionException | InterruptedException e) {
|
codereview_new_java_data_12450
|
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.requests.FetchRequest;
-import org.apache.kafka.common.utils.FetchRequestUtils;
public enum FetchIsolation {
LOG_END,
HIGH_WATERMARK,
TXN_COMMITTED;
- public static FetchIsolation apply(FetchRequest request) {
- return apply(request.replicaId(), request.isolationLevel());
}
- public static FetchIsolation apply(int replicaId, IsolationLevel isolationLevel) {
- if (!FetchRequestUtils.isConsumer(replicaId)) {
return LOG_END;
} else if (isolationLevel == IsolationLevel.READ_COMMITTED) {
return TXN_COMMITTED;
I'd call this method and the other methods `of`. That's more common for Java.
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.requests.FetchRequest;
public enum FetchIsolation {
LOG_END,
HIGH_WATERMARK,
TXN_COMMITTED;
+ public static FetchIsolation of(FetchRequest request) {
+ return of(request.replicaId(), request.isolationLevel());
}
+ public static FetchIsolation of(int replicaId, IsolationLevel isolationLevel) {
+ if (!FetchRequest.isConsumer(replicaId)) {
return LOG_END;
} else if (isolationLevel == IsolationLevel.READ_COMMITTED) {
return TXN_COMMITTED;
|
codereview_new_java_data_12452
|
public static long offsetFromFileName(String fileName) {
return Long.parseLong(fileName.substring(0, fileName.indexOf('.')));
}
- private volatile File file;
public final long offset;
public SnapshotFile(File file) {
this(file, offsetFromFileName(file.getName()));
Nit: let's place `public` fields first.
public static long offsetFromFileName(String fileName) {
return Long.parseLong(fileName.substring(0, fileName.indexOf('.')));
}
public final long offset;
+ private volatile File file;
public SnapshotFile(File file) {
this(file, offsetFromFileName(file.getName()));
|
codereview_new_java_data_12453
|
public static String toLogDateTimeFormat(long timestamp) {
/**
* Replace the given string suffix with the new suffix. If the string doesn't end with the given suffix throw an exception.
*/
- public static String replaceSuffix(String name, String oldSuffix, String newSuffix) {
- if (!name.endsWith(oldSuffix))
- throw new IllegalArgumentException("Expected string to end with " + oldSuffix + " but string is " + name);
- return name.substring(0, name.length() - oldSuffix.length()) + newSuffix;
}
}
Nit: `name` should probably be `s` or something that makes it clear that it can be any string, not just a name.
Also, can we replace `CoreUtils.replaceSuffix` with this method and ensure we have unit tests for this new method?
public static String toLogDateTimeFormat(long timestamp) {
/**
* Replace the given string suffix with the new suffix. If the string doesn't end with the given suffix throw an exception.
*/
+ public static String replaceSuffix(String str, String oldSuffix, String newSuffix) {
+ if (!str.endsWith(oldSuffix))
+ throw new IllegalArgumentException("Expected string to end with " + oldSuffix + " but string is " + str);
+ return str.substring(0, str.length() - oldSuffix.length()) + newSuffix;
}
}
|
codereview_new_java_data_12454
|
*/
public class LogOffsetMetadata {
- //TODO remove once UnifiedLog has been moved to the storage module
private static final long UNIFIED_LOG_UNKNOWN_OFFSET = -1L;
public static final LogOffsetMetadata UNKNOWN_OFFSET_METADATA = new LogOffsetMetadata(-1L, 0L, 0);
Can we please use Jira tickets instead of TODOs? The latter tend to clutter the codebase and are often forgotten.
*/
public class LogOffsetMetadata {
+ //TODO KAFKA-14484 remove once UnifiedLog has been moved to the storage module
private static final long UNIFIED_LOG_UNKNOWN_OFFSET = -1L;
public static final LogOffsetMetadata UNKNOWN_OFFSET_METADATA = new LogOffsetMetadata(-1L, 0L, 0);
|
codereview_new_java_data_12455
|
public void testFetchDisconnectedShouldNotClearPreferredReadReplicaIfUnassigned(
consumerClient.poll(time.timer(0));
assertFalse(fetcher.hasCompletedFetches());
fetchedRecords();
}
@Test
Should we add the following assertion to be consistent with other tests?
```
selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
assertEquals(-1, selected.id());
```
public void testFetchDisconnectedShouldNotClearPreferredReadReplicaIfUnassigned(
consumerClient.poll(time.timer(0));
assertFalse(fetcher.hasCompletedFetches());
fetchedRecords();
+ selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds());
+ assertEquals(-1, selected.id());
}
@Test
|
codereview_new_java_data_12459
|
public Builder(ConsumerGroupHeartbeatRequestData data) {
this.data = data;
}
-
@Override
public ConsumerGroupHeartbeatRequest build(short version) {
return new ConsumerGroupHeartbeatRequest(data, version);
nit: double space here
public Builder(ConsumerGroupHeartbeatRequestData data) {
this.data = data;
}
@Override
public ConsumerGroupHeartbeatRequest build(short version) {
return new ConsumerGroupHeartbeatRequest(data, version);
|
codereview_new_java_data_12460
|
public enum Errors {
INELIGIBLE_REPLICA(107, "The new ISR contains at least one ineligible replica.", IneligibleReplicaException::new),
NEW_LEADER_ELECTED(108, "The AlterPartition request successfully updated the partition state but the leader has changed.", NewLeaderElectedException::new),
OFFSET_MOVED_TO_TIERED_STORAGE(109, "The requested offset is moved to tiered storage.", OffsetMovedToTieredStorageException::new),
- FENCED_MEMBER_EPOCH(110, "The member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoins.", FencedMemberEpochException::new),
UNRELEASED_INSTANCE_ID(111, "The instance ID is still used by another member in the consumer group. That member must leave first.", UnreleasedInstanceIdException::new),
UNSUPPORTED_ASSIGNOR(112, "The assignor or its version range is not supported by the consumer group.", UnsupportedAssignorException::new);
nit: rejoin (singular)?
public enum Errors {
INELIGIBLE_REPLICA(107, "The new ISR contains at least one ineligible replica.", IneligibleReplicaException::new),
NEW_LEADER_ELECTED(108, "The AlterPartition request successfully updated the partition state but the leader has changed.", NewLeaderElectedException::new),
OFFSET_MOVED_TO_TIERED_STORAGE(109, "The requested offset is moved to tiered storage.", OffsetMovedToTieredStorageException::new),
+ FENCED_MEMBER_EPOCH(110, "The member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoin.", FencedMemberEpochException::new),
UNRELEASED_INSTANCE_ID(111, "The instance ID is still used by another member in the consumer group. That member must leave first.", UnreleasedInstanceIdException::new),
UNSUPPORTED_ASSIGNOR(112, "The assignor or its version range is not supported by the consumer group.", UnsupportedAssignorException::new);
|
codereview_new_java_data_12464
|
* limitations under the License.
*/
/**
- * Provides API for application-defined metadata attached to Connect records.
*/
package org.apache.kafka.connect.header;
\ No newline at end of file
Nit:
```suggestion
* Provides an API for application-defined metadata attached to Connect records.
```
* limitations under the License.
*/
/**
+ * Provides an API for application-defined metadata attached to Connect records.
*/
package org.apache.kafka.connect.header;
\ No newline at end of file
|
codereview_new_java_data_12465
|
* limitations under the License.
*/
/**
- * Provides pluggable interface for altering the behavior of the Connect REST API.
*/
package org.apache.kafka.connect.rest;
\ No newline at end of file
Nit:
```suggestion
* Provides a pluggable interface for altering the behavior of the Connect REST API.
```
* limitations under the License.
*/
/**
+ * Provides a pluggable interface for altering the behavior of the Connect REST API.
*/
package org.apache.kafka.connect.rest;
\ No newline at end of file
|
codereview_new_java_data_12466
|
* limitations under the License.
*/
/**
- * Provides API for implementing connectors which read data from external applications into Kafka.
*/
package org.apache.kafka.connect.source;
\ No newline at end of file
Same nits:
```suggestion
* Provides an API for implementing connectors which read data from external applications into Kafka,
* also known as <i>source connectors</i>.
```
* limitations under the License.
*/
/**
+ * Provides an API for implementing source connectors which read data from external applications into Kafka.
*/
package org.apache.kafka.connect.source;
\ No newline at end of file
|
codereview_new_java_data_12467
|
* limitations under the License.
*/
/**
- * Provides high-level abstractions of streaming data and computation over that streaming data.
*/
package org.apache.kafka.streams.kstream;
\ No newline at end of file
What about this:
```
Provides a high-level programming model (DSL) to express a (stateful) data flow computation over input {@link KStream streams} and {@link KTable tables}. Use {@link org.apache.kafka.streams.StreamsBuilder} as entry for your program.
```
* limitations under the License.
*/
/**
+ * Provides a high-level programming model (DSL) to express a (stateful) data flow computation over input {@link org.apache.kafka.streams.kstream.KStream streams} and {@link org.apache.kafka.streams.kstream.KTable tables}.
+ * Use {@link org.apache.kafka.streams.StreamsBuilder} as entry for your program.
*/
package org.apache.kafka.streams.kstream;
\ No newline at end of file
|
codereview_new_java_data_12468
|
* limitations under the License.
*/
/**
- * Provides low-level abstractions of streaming data and computation over that streaming data.
*/
package org.apache.kafka.streams.processor.api;
\ No newline at end of file
What about this:
```
Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics. Use {@link org.apache.kafka.streams.Topology} as entry for your program.
```
* limitations under the License.
*/
/**
+ * Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics.
+ * Use {@link org.apache.kafka.streams.Topology} as the entry point for your program.
*/
package org.apache.kafka.streams.processor.api;
\ No newline at end of file
|
codereview_new_java_data_12469
|
* limitations under the License.
*/
/**
- * Kafka Client for performing administrative operations on a Kafka cluster
*/
package org.apache.kafka.clients.admin;
\ No newline at end of file
```suggestion
* Provides a Kafka client for performing administrative operations (such as creating topics and configuring brokers) on a Kafka cluster.
```
* limitations under the License.
*/
/**
+ * Provides a Kafka client for performing administrative operations (such as creating topics and configuring brokers) on a Kafka cluster.
*/
package org.apache.kafka.clients.admin;
\ No newline at end of file
|
codereview_new_java_data_12470
|
* limitations under the License.
*/
/**
- * Kafka Client for consuming events from a Kafka Cluster
*/
package org.apache.kafka.clients.consumer;
\ No newline at end of file
```suggestion
* Provides a Kafka client for consuming records from topics and/or partitions in a Kafka cluster.
```
* limitations under the License.
*/
/**
+ * Provides a Kafka client for consuming records from topics and/or partitions in a Kafka cluster.
*/
package org.apache.kafka.clients.consumer;
\ No newline at end of file
|
codereview_new_java_data_12471
|
* limitations under the License.
*/
/**
- * Provides mechanisms for emitting monitoring metrics.
*/
package org.apache.kafka.common.metrics;
\ No newline at end of file
```suggestion
* Provides the API used by Kafka clients to emit metrics which are then exposed using the {@link MetricsReporter} interface.
```
* limitations under the License.
*/
/**
+ * Provides the API used by Kafka clients to emit metrics which are then exposed using the * {@link org.apache.kafka.common.metrics.MetricsReporter} interface.
*/
package org.apache.kafka.common.metrics;
\ No newline at end of file
|
codereview_new_java_data_12472
|
*/
/**
* This package is deprecated.
*/
package org.apache.kafka.common.security.oauthbearer.secured;
\ No newline at end of file
You could add the `@Deprecated` annotation and the `@deprecated` Javadoc tag.
*/
/**
* This package is deprecated.
+ * @deprecated See {@link org.apache.kafka.common.security.oauthbearer}
*/
package org.apache.kafka.common.security.oauthbearer.secured;
\ No newline at end of file
|
codereview_new_java_data_12473
|
private TopicAssignment place(
PlacementSpec placementSpec = new PlacementSpec(startPartition,
numPartitions,
replicationFactor);
- ClusterDescriber cluster = () -> brokers.iterator();
- return placer.place(placementSpec, cluster);
}
/**
How about `brokers::iterator`?
private TopicAssignment place(
PlacementSpec placementSpec = new PlacementSpec(startPartition,
numPartitions,
replicationFactor);
+ return placer.place(placementSpec, brokers::iterator);
}
/**
|
codereview_new_java_data_12475
|
KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
* For example, for {@link EmitStrategy#onWindowClose} strategy, the aggregated result for a
* window will only be emitted when the window closes. For {@link EmitStrategy#onWindowUpdate()}
* strategy, the aggregated result for a window will be emitted whenever there is an update to
- * the window.
*
* @param emitStrategy {@link EmitStrategy} to configure when the aggregated result for a window will be emitted.
* @return a {@code SessionWindowedKStream} with {@link EmitStrategy} configured.
> emitted whenever there is an update
Should we mention caching in addition?
KTable<Windowed<K>, V> reduce(final Reducer<V> reducer,
* For example, for {@link EmitStrategy#onWindowClose} strategy, the aggregated result for a
* window will only be emitted when the window closes. For {@link EmitStrategy#onWindowUpdate()}
* strategy, the aggregated result for a window will be emitted whenever there is an update to
+ * the window. Note that whether the result will be available in downstream also depends on
+ * cache policy.
*
* @param emitStrategy {@link EmitStrategy} to configure when the aggregated result for a window will be emitted.
* @return a {@code SessionWindowedKStream} with {@link EmitStrategy} configured.
|
codereview_new_java_data_12476
|
public FindCoordinatorResponse(FindCoordinatorResponseData data) {
this.data = data;
}
- public Optional<Coordinator> getCoordinatorByKey(String key) {
Objects.requireNonNull(key);
if (this.data.coordinators().isEmpty()) {
// version <= 3
Can we write a test case which covers all versions?
public FindCoordinatorResponse(FindCoordinatorResponseData data) {
this.data = data;
}
+ public Optional<Coordinator> coordinatorByKey(String key) {
Objects.requireNonNull(key);
if (this.data.coordinators().isEmpty()) {
// version <= 3
|
codereview_new_java_data_12486
|
public class ConnectorsResource implements ConnectResource {
private final boolean isTopicTrackingDisabled;
private final boolean isTopicTrackingResetDisabled;
- public ConnectorsResource(Herder herder, WorkerConfig config) {
- this(herder, config, new RestClient(config));
- }
public ConnectorsResource(Herder herder, WorkerConfig config, RestClient restClient) {
this.herder = herder;
this.restClient = restClient;
It doesn't look like this constructor is used anywhere; can we remove it?
public class ConnectorsResource implements ConnectResource {
private final boolean isTopicTrackingDisabled;
private final boolean isTopicTrackingResetDisabled;
public ConnectorsResource(Herder herder, WorkerConfig config, RestClient restClient) {
this.herder = herder;
this.restClient = restClient;
|
codereview_new_java_data_12487
|
* */
default Optional<Set<Integer>> partitions(String topic, K key, V value, int numPartitions) {
final Integer partition = partition(topic, key, value, numPartitions);
- return partition == null ? Optional.empty() : Optional.of(Collections.singleton(partition(topic, key, value, numPartitions)));
}
}
```suggestion
return partition == null ? Optional.empty() : Optional.of(Collections.singleton(partition));
```
* */
default Optional<Set<Integer>> partitions(String topic, K key, V value, int numPartitions) {
final Integer partition = partition(topic, key, value, numPartitions);
+ return partition == null ? Optional.empty() : Optional.of(Collections.singleton(partition));
}
}
|
codereview_new_java_data_12488
|
public Future<Void> set(final Map<ByteBuffer, ByteBuffer> values, final Callback
return producerCallback;
}
- protected final Callback<ConsumerRecord<byte[], byte[]>> consumedCallback = new Callback<ConsumerRecord<byte[], byte[]>>() {
- @Override
- public void onCompletion(Throwable error, ConsumerRecord<byte[], byte[]> record) {
- ByteBuffer key = record.key() != null ? ByteBuffer.wrap(record.key()) : null;
- ByteBuffer value = record.value() != null ? ByteBuffer.wrap(record.value()) : null;
- data.put(key, value);
}
};
It's a little strange to keep the `value` initializer the way it was before. IMO this would be more readable:
```java
if (record.value() == null)
data.remove(key);
else
data.put(key, ByteBuffer.wrap(record.value()));
```
public Future<Void> set(final Map<ByteBuffer, ByteBuffer> values, final Callback
return producerCallback;
}
+ protected final Callback<ConsumerRecord<byte[], byte[]>> consumedCallback = (error, record) -> {
+ if (error != null) {
+ log.error("Failed to read from the offsets topic", error);
+ return;
+ }
+
+ ByteBuffer key = record.key() != null ? ByteBuffer.wrap(record.key()) : null;
+
+ if (record.value() == null) {
+ data.remove(key);
+ } else {
+ data.put(key, ByteBuffer.wrap(record.value()));
}
};
|
codereview_new_java_data_12493
|
public void shouldRethrowTaskCorruptedExceptionFromInitialization() {
.withInputPartitions(taskId01Partitions).build();
final StreamTask statefulTask2 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.CREATED)
- .withInputPartitions(taskId01Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManager(ProcessingMode.EXACTLY_ONCE_V2, tasks, true);
when(tasks.drainPendingTaskToInit()).thenReturn(mkSet(statefulTask0, statefulTask1, statefulTask2));
nit:
```suggestion
.withInputPartitions(taskId02Partitions).build();
```
public void shouldRethrowTaskCorruptedExceptionFromInitialization() {
.withInputPartitions(taskId01Partitions).build();
final StreamTask statefulTask2 = statefulTask(taskId02, taskId02ChangelogPartitions)
.inState(State.CREATED)
+ .withInputPartitions(taskId02Partitions).build();
final TasksRegistry tasks = mock(TasksRegistry.class);
final TaskManager taskManager = setUpTaskManager(ProcessingMode.EXACTLY_ONCE_V2, tasks, true);
when(tasks.drainPendingTaskToInit()).thenReturn(mkSet(statefulTask0, statefulTask1, statefulTask2));
|
codereview_new_java_data_12494
|
public void testRefreshTopicPartitionsTopicOnTargetFirst() throws Exception {
}
@Test
- public void testIsCycleWithNullUpstreamTopic() throws Exception {
class BadReplicationPolicy extends DefaultReplicationPolicy {
@Override
public String upstreamTopic(String topic) {
I think we can drop `throws Exception`
public void testRefreshTopicPartitionsTopicOnTargetFirst() throws Exception {
}
@Test
+ public void testIsCycleWithNullUpstreamTopic() {
class BadReplicationPolicy extends DefaultReplicationPolicy {
@Override
public String upstreamTopic(String topic) {
|
codereview_new_java_data_12495
|
public void shouldNotTransitToStandbyAgainAfterStandbyTaskFailed() throws Except
mkEntry(task1.id(), task1),
mkEntry(task2.id(), task2)
);
- final TaskCorruptedException taskCorruptedException =
- new TaskCorruptedException(mkSet(task1.id()));
- final ExceptionAndTasks expectedExceptionAndTasks =
- new ExceptionAndTasks(mkSet(task1), taskCorruptedException);
when(changelogReader.allChangelogsCompleted()).thenReturn(false);
doThrow(taskCorruptedException).doNothing().when(changelogReader).restore(updatingTasks);
nit:
```suggestion
final TaskCorruptedException taskCorruptedException = new TaskCorruptedException(mkSet(task1.id()));
```
public void shouldNotTransitToStandbyAgainAfterStandbyTaskFailed() throws Except
mkEntry(task1.id(), task1),
mkEntry(task2.id(), task2)
);
+ final TaskCorruptedException taskCorruptedException = new TaskCorruptedException(mkSet(task1.id()));
+ final ExceptionAndTasks expectedExceptionAndTasks = new ExceptionAndTasks(mkSet(task1), taskCorruptedException);
when(changelogReader.allChangelogsCompleted()).thenReturn(false);
doThrow(taskCorruptedException).doNothing().when(changelogReader).restore(updatingTasks);
|
codereview_new_java_data_12497
|
* This is the abstract definition of the events created by the KafkaConsumer API
*/
abstract public class ApplicationEvent {
- public final EventType type;
-
- public ApplicationEvent(EventType type) {
- this.type = type;
- }
-
- public enum EventType {
- COMMIT,
- NOOP,
- }
}
Ditto, I think we should keep the testing-related element out of the production code eventually. If we are going to remove it later then that's fine.
* This is the abstract definition of the events created by the KafkaConsumer API
*/
abstract public class ApplicationEvent {
+ /**
+ * process the application event. Return true upon succesful execution,
+ * false otherwise.
+ * @return true if the event was successfully executed; false otherwise.
+ */
+ public abstract boolean process();
}
|
codereview_new_java_data_12498
|
private void addTasksToStateUpdater() {
}
private void pauseTasksInStateUpdater() {
- for (final Task task : stateUpdater.getUpdatingTasks()) {
- if (topologyMetadata.isPaused(task.id().topologyName())) {
- stateUpdater.pause(task.id());
- }
- }
}
private void resumeTasksInStateUpdater() {
- for (final Task task : stateUpdater.getPausedTasks()) {
- if (!topologyMetadata.isPaused(task.id().topologyName())) {
- stateUpdater.resume(task.id());
- }
- }
}
public void handleExceptionsFromStateUpdater() {
I'm a bit concerned about perf here since this is called for each iteration, and to be safer we'd need to wrap each as read-only task as we did for `getTasks`, hence generating a lot of young gen for GCs, but other than the current approach I have not got to a better solution.
private void addTasksToStateUpdater() {
}
private void pauseTasksInStateUpdater() {
+ stateUpdater.pause(topologyMetadata);
}
private void resumeTasksInStateUpdater() {
+ stateUpdater.resume(topologyMetadata);
}
public void handleExceptionsFromStateUpdater() {
|
codereview_new_java_data_12499
|
private void rewriteSingleStoreSelfJoin(
parent.removeChild(right);
joinNode.setSelfJoin();
} else {
- throw new StreamsException(String.format("Expected the left node %s to have smaller build priority than the right node %s.", left, right));
}
}
for (final GraphNode child: currentNode.children()) {
nit: I'd suggest we throw an unchecked illegal-state-exception directly since this should never happen, while streams exception is a checked exception.
private void rewriteSingleStoreSelfJoin(
parent.removeChild(right);
joinNode.setSelfJoin();
} else {
+ throw new IllegalStateException(String.format("Expected the left node %s to have smaller build priority than the right node %s.", left, right));
}
}
for (final GraphNode child: currentNode.children()) {
|
codereview_new_java_data_12500
|
public interface ChangelogRegister {
*/
void register(final TopicPartition partition, final ProcessorStateManager stateManager);
- void register(final Set<TopicPartition> partition, final ProcessorStateManager stateManager);
/**
* Unregisters and removes the passed in partitions from the set of changelogs
This is not strictly needed, but I thought it makes registering and unregistering a bit more symmetric.
public interface ChangelogRegister {
*/
void register(final TopicPartition partition, final ProcessorStateManager stateManager);
+ void register(final Set<TopicPartition> partitions, final ProcessorStateManager stateManager);
/**
* Unregisters and removes the passed in partitions from the set of changelogs
|
codereview_new_java_data_12501
|
public interface ChangelogRegister {
*/
void register(final TopicPartition partition, final ProcessorStateManager stateManager);
- void register(final Set<TopicPartition> partition, final ProcessorStateManager stateManager);
/**
* Unregisters and removes the passed in partitions from the set of changelogs
nit: partition -> partitions?
public interface ChangelogRegister {
*/
void register(final TopicPartition partition, final ProcessorStateManager stateManager);
+ void register(final Set<TopicPartition> partitions, final ProcessorStateManager stateManager);
/**
* Unregisters and removes the passed in partitions from the set of changelogs
|
codereview_new_java_data_12502
|
public boolean poll(RequestFuture<?> future, Timer timer) {
* @param disableWakeup true if we should not check for wakeups, false otherwise
*
* @return true if the future is done, false otherwise
- * @throws WakeupException if {@link #wakeup()} is called from another thread
* @throws InterruptException if the calling thread is interrupted
*/
public boolean poll(RequestFuture<?> future, Timer timer, boolean disableWakeup) {
We should mention `WakeupException` only throws when `disableWakeup` is false.
public boolean poll(RequestFuture<?> future, Timer timer) {
* @param disableWakeup true if we should not check for wakeups, false otherwise
*
* @return true if the future is done, false otherwise
+ * @throws WakeupException if {@link #wakeup()} is called from another thread and `disableWakeup` is false
* @throws InterruptException if the calling thread is interrupted
*/
public boolean poll(RequestFuture<?> future, Timer timer, boolean disableWakeup) {
|
codereview_new_java_data_12503
|
public void testMarkingPartitionPending() {
assertTrue(state.isFetchable(tp0));
state.markPendingRevocation(singleton(tp0));
assertFalse(state.isFetchable(tp0));
}
@Test
Perhaps we can also assert `isPaused` is false?
public void testMarkingPartitionPending() {
assertTrue(state.isFetchable(tp0));
state.markPendingRevocation(singleton(tp0));
assertFalse(state.isFetchable(tp0));
+ assertFalse(state.isPaused(tp0));
}
@Test
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.