id
stringlengths
29
30
content
stringlengths
152
2.6k
codereview_new_java_data_8249
final class ArmeriaServerHttpRequest extends AbstractServerHttpRequest { private static HttpHeaders springHeaders(RequestHeaders headers) { final HttpHeaders springHeaders = new HttpHeaders(); - toHttp1Headers(headers, (key, value) -> springHeaders.add(key.toString(), value)); return springHeaders; } How about adding a new functional interface that takes three arguments and injecting the `springHeaders` into the method so that the lambda does not capture the object outside of the local scope. ```suggestion toHttp1Headers(headers, springHeaders, (output, key, value) -> output.add(key.toString(), value)); ``` final class ArmeriaServerHttpRequest extends AbstractServerHttpRequest { private static HttpHeaders springHeaders(RequestHeaders headers) { final HttpHeaders springHeaders = new HttpHeaders(); + toHttp1Headers(headers, springHeaders, (output, key, value) -> output.add(key.toString(), value)); return springHeaders; }
codereview_new_java_data_8250
public HttpHeaders getHeaders() { } this.httpHeaders = new HttpHeaders(); toHttp1Headers(headers, this.httpHeaders, - (output, header, value) -> output.add(header.toString(), value)); return this.httpHeaders; } nit: ```suggestion (output, key, value) -> output.add(key.toString(), value)); ``` public HttpHeaders getHeaders() { } this.httpHeaders = new HttpHeaders(); toHttp1Headers(headers, this.httpHeaders, + (output, key, value) -> output.add(key.toString(), value)); return this.httpHeaders; }
codereview_new_java_data_8251
private Flags() {} // These static variables are defined at the end of this file deliberately // to ensure that all static variables beforehand are initialized. private static final boolean INITIALIZED; static { INITIALIZED = true; } Question: The behavior of this inline initialization (`boolean INITIALIZED = true`) is different from the `static` initialization block? private Flags() {} // These static variables are defined at the end of this file deliberately // to ensure that all static variables beforehand are initialized. private static final boolean INITIALIZED; + static { INITIALIZED = true; }
codereview_new_java_data_8252
package com.linecorp.armeria.common; import org.junit.jupiter.api.Test; -public class CyclicDependencyTest { @Test void testFlags() { - System.out.println(Flags.requestContextStorageProvider()); } } How about using `assertThatCode(...).doesNotThrowAnyException()`? package com.linecorp.armeria.common; +import static org.assertj.core.api.Assertions.assertThatCode; + import org.junit.jupiter.api.Test; +class CyclicDependencyTest { @Test void testFlags() { + assertThatCode(Flags::requestContextStorageProvider) + .doesNotThrowAnyException(); } }
codereview_new_java_data_8253
package com.linecorp.armeria.common; import org.junit.jupiter.api.Test; -public class CyclicDependencyTest { @Test void testFlags() { - System.out.println(Flags.requestContextStorageProvider()); } } How about prefixing `Flags` to tell what could have cyclic dependencies. ```suggestion class FlagsCyclicDependencyTest { ``` package com.linecorp.armeria.common; +import static org.assertj.core.api.Assertions.assertThatCode; + import org.junit.jupiter.api.Test; +class CyclicDependencyTest { @Test void testFlags() { + assertThatCode(Flags::requestContextStorageProvider) + .doesNotThrowAnyException(); } }
codereview_new_java_data_8254
default HttpResponse recover(Function<? super Throwable, ? extends HttpResponse> } /** - * Recovers an {@link HttpResponse} exception to the specified {@link Throwable} class. - * when any error occurs before a {@link ResponseHeaders} is written. - * Note that the failed {@link HttpResponse} cannot be recovered from an error if - * a {@link ResponseHeaders} was written already * * <p>Example:<pre>{@code * HttpResponse response = HttpResponse.ofFailure(new IllegalStateException("Oops...")); This isn't a complete sentence ```suggestion * Recovers a failed {@link HttpResponse} by switching to a returned fallback {@link HttpResponse} * when the thrown {@link Throwable} is the same type or a subtype of the * specified {@code causeClass}. ``` default HttpResponse recover(Function<? super Throwable, ? extends HttpResponse> } /** + * Recovers a failed {@link HttpResponse} by switching to a returned fallback {@link HttpResponse} + * when the thrown {@link Throwable} is the same type or a subtype of the + * specified {@code causeClass}. * * <p>Example:<pre>{@code * HttpResponse response = HttpResponse.ofFailure(new IllegalStateException("Oops..."));
codereview_new_java_data_8255
default StreamMessage<T> recoverAndResume( } /** - * Recovers failed {@link StreamMessage} for the specified {@link Throwable} and resumes by subscribing * to a returned fallback {@link StreamMessage} when any error occurs. * * <p>Example:<pre>{@code ```suggestion * Recovers a failed {@link StreamMessage} for the specified {@link Throwable} and resumes by subscribing ``` default StreamMessage<T> recoverAndResume( } /** + * Recovers a failed {@link StreamMessage} for the specified {@link Throwable} and resumes by subscribing * to a returned fallback {@link StreamMessage} when any error occurs. * * <p>Example:<pre>{@code
codereview_new_java_data_8256
default HttpResponse recover(Function<? super Throwable, ? extends HttpResponse> * // In this case, CompletionException is returned. (can't recover exception) * misMatchRecovered.aggregate().join(); * }</pre> - * */ @UnstableApi default <T extends Throwable> HttpResponse recover(Class<T> causeClass, Function<? super T, ? extends HttpResponse> function) { ```suggestion */ ``` default HttpResponse recover(Function<? super Throwable, ? extends HttpResponse> * // In this case, CompletionException is returned. (can't recover exception) * misMatchRecovered.aggregate().join(); * }</pre> + */ @UnstableApi default <T extends Throwable> HttpResponse recover(Class<T> causeClass, Function<? super T, ? extends HttpResponse> function) {
codereview_new_java_data_8257
default HttpResponse recover(Function<? super Throwable, ? extends HttpResponse> * * HttpResponse response = HttpResponse.ofFailure(new IllegalStateException("Oops...")); * // If the exception type does not match - * HttpResponse misMatchRecovered = * response.recover(IllegalArgumentException.class, cause -> HttpResponse.of("Fallback")); * // In this case, CompletionException is thrown. (can't recover exception) - * misMatchRecovered.aggregate().join(); * }</pre> */ @UnstableApi nit: ```suggestion * HttpResponse mismatchRecovered = ``` `mismatch` is a word. default HttpResponse recover(Function<? super Throwable, ? extends HttpResponse> * * HttpResponse response = HttpResponse.ofFailure(new IllegalStateException("Oops...")); * // If the exception type does not match + * HttpResponse mismatchRecovered = * response.recover(IllegalArgumentException.class, cause -> HttpResponse.of("Fallback")); * // In this case, CompletionException is thrown. (can't recover exception) + * mismatchRecovered.aggregate().join(); * }</pre> */ @UnstableApi
codereview_new_java_data_8258
default <T extends Throwable> HttpResponse recover(Class<T> causeClass, requireNonNull(causeClass, "causeClass"); requireNonNull(function, "function"); return recover(cause -> { - if (!causeClass.isAssignableFrom(cause.getClass())) { return Exceptions.throwUnsafely(cause); } try { Could be simplified into `!causeClass.isInstance(cause)`? default <T extends Throwable> HttpResponse recover(Class<T> causeClass, requireNonNull(causeClass, "causeClass"); requireNonNull(function, "function"); return recover(cause -> { + if (!causeClass.isInstance(cause)) { return Exceptions.throwUnsafely(cause); } try {
codereview_new_java_data_8259
public void testBinaryNode() throws IOException { mapper.writeTree(mapper.createGenerator(writer), new BinaryNode(expected)); - JsonNode binaryNode = mapper.readTree(writer.toString()); - assertTrue(binaryNode.isTextual(), binaryNode.toString()); - byte[] actual = MessageUtil.jsonNodeToBinary(binaryNode, "Test base64 JSON string"); assertArrayEquals(expected, actual); } @Test - public void testInvalidBineryNode() { assertThrows( IllegalArgumentException.class, () -> MessageUtil.jsonNodeToBinary(new IntNode(42), "Test int to binary") can you make the assertion failure message a little bit clearer? Just printing the node seems a bit cryptic. public void testBinaryNode() throws IOException { mapper.writeTree(mapper.createGenerator(writer), new BinaryNode(expected)); + JsonNode textNode = mapper.readTree(writer.toString()); + assertTrue(textNode.isTextual(), String.format("Expected a JSON string but was: %s", textNode.toString())); + byte[] actual = MessageUtil.jsonNodeToBinary(textNode, "Test base64 JSON string"); assertArrayEquals(expected, actual); } @Test + public void testInvalidBinaryNode() { assertThrows( IllegalArgumentException.class, () -> MessageUtil.jsonNodeToBinary(new IntNode(42), "Test int to binary")
codereview_new_java_data_8262
public void onMetadataUpdate( case SNAPSHOT: publishSnapshot(delta, newImage, (SnapshotManifest) manifest); break; - default: - break; } } wdyt about throwing an IllegalStateException in the `default`. I do that sometimes to future-proof against unhandled enums down the road. public void onMetadataUpdate( case SNAPSHOT: publishSnapshot(delta, newImage, (SnapshotManifest) manifest); break; } }
codereview_new_java_data_8264
public enum LogStartOffsetIncrementReason { ClientRecordDeletion("client delete records request"), SnapshotGenerated("snapshot generated"); - private final String value; - LogStartOffsetIncrementReason(String value) { - this.value = value; } @Override public String toString() { - return value; } } nit: rename the `value` to a meaningful name, ex: `reason`? public enum LogStartOffsetIncrementReason { ClientRecordDeletion("client delete records request"), SnapshotGenerated("snapshot generated"); + private final String reason; + LogStartOffsetIncrementReason(String reason) { + this.reason = reason; } @Override public String toString() { + return reason; } }
codereview_new_java_data_8265
import java.util.Optional; /** - * Structure used for lower level reads using [[kafka.cluster.Partition.fetchRecords()]]. */ public class LogReadInfo { The java doc needs to be updated to java style with `@link` import java.util.Optional; /** + * Structure used for lower level reads using {@link kafka.cluster.Partition#fetchRecords()}. */ public class LogReadInfo {
codereview_new_java_data_8270
public void putAll(final List<KeyValue<Bytes, byte[]>> entries) { @Override public byte[] delete(final Bytes key) { - throw new UnsupportedOperationException("Versioned key-value stores do not support delete(key)"); } @Override Should we point to `delete(key, ts)` in the error message? public void putAll(final List<KeyValue<Bytes, byte[]>> entries) { @Override public byte[] delete(final Bytes key) { + throw new UnsupportedOperationException("Versioned key-value stores do not support delete(key). Use delete(key, timestamp) instead."); } @Override
codereview_new_java_data_8271
public void maybeSetThrottleTimeMs(int throttleTimeMs) { public Map<String, Map<TopicPartition, Errors>> errors() { Map<String, Map<TopicPartition, Errors>> errorsMap = new HashMap<>(); - errorsMap.put(V3_AND_BELOW_TXN_ID, errorsForTransaction(this.data.resultsByTopicV3AndBelow())); for (AddPartitionsToTxnResult result : this.data.resultsByTransaction()) { - String transactionalId = result.transactionalId(); - errorsMap.put(transactionalId, errorsForTransaction(data().resultsByTransaction().find(transactionalId).topicResults())); } return errorsMap; Can't you reuse `result` instead of calling `data().resultsByTransaction().find(transactionalId)`? public void maybeSetThrottleTimeMs(int throttleTimeMs) { public Map<String, Map<TopicPartition, Errors>> errors() { Map<String, Map<TopicPartition, Errors>> errorsMap = new HashMap<>(); + if (this.data.resultsByTopicV3AndBelow().size() != 0) { + errorsMap.put(V3_AND_BELOW_TXN_ID, errorsForTransaction(this.data.resultsByTopicV3AndBelow())); + } for (AddPartitionsToTxnResult result : this.data.resultsByTransaction()) { + errorsMap.put(result.transactionalId(), errorsForTransaction(result.topicResults())); } return errorsMap;
codereview_new_java_data_8272
public void testBatchedErrors() { assertEquals(txn1Errors, errorsForTransaction(response.getTransactionTopicResults("txn1"))); assertEquals(txn2Errors, errorsForTransaction(response.getTransactionTopicResults("txn2"))); } } nit: Should we add a test for `errors()`? public void testBatchedErrors() { assertEquals(txn1Errors, errorsForTransaction(response.getTransactionTopicResults("txn1"))); assertEquals(txn2Errors, errorsForTransaction(response.getTransactionTopicResults("txn2"))); + + Map<String, Map<TopicPartition, Errors>> expectedErrors = new HashMap<>(); + expectedErrors.put("txn1", txn1Errors); + expectedErrors.put("txn2", txn2Errors); + assertEquals(expectedErrors, response.errors()); } }
codereview_new_java_data_8275
public static class ConsumerPerfRebListener implements ConsumerRebalanceListener private long joinStartMs, joinTimeMsInSingleRound; public ConsumerPerfRebListener(AtomicLong joinGroupTimeMs, long joinStartMs, long joinTimeMsInSingleRound) { - super(); this.joinGroupTimeMs = joinGroupTimeMs; this.joinStartMs = joinStartMs; this.joinTimeMsInSingleRound = joinTimeMsInSingleRound; nit - not needed. public static class ConsumerPerfRebListener implements ConsumerRebalanceListener private long joinStartMs, joinTimeMsInSingleRound; public ConsumerPerfRebListener(AtomicLong joinGroupTimeMs, long joinStartMs, long joinTimeMsInSingleRound) { this.joinGroupTimeMs = joinGroupTimeMs; this.joinStartMs = joinStartMs; this.joinTimeMsInSingleRound = joinTimeMsInSingleRound;
codereview_new_java_data_8276
public KafkaRaftMetrics(Metrics metrics, String metricGrpPrefix, QuorumState sta new Rate(TimeUnit.SECONDS, new WindowedSum())); this.pollDurationSensor = metrics.sensor("poll-idle-ratio"); - this.pollDurationSensor.add(metrics.metricName( "poll-idle-ratio-avg", metricGroupName, "The ratio of time the Raft IO thread is idle as opposed to " + Minor but I would add a newline before `metrics.metricName`. public KafkaRaftMetrics(Metrics metrics, String metricGrpPrefix, QuorumState sta new Rate(TimeUnit.SECONDS, new WindowedSum())); this.pollDurationSensor = metrics.sensor("poll-idle-ratio"); + this.pollDurationSensor.add( + metrics.metricName( "poll-idle-ratio-avg", metricGroupName, "The ratio of time the Raft IO thread is idle as opposed to " +
codereview_new_java_data_8277
public class TimeRatio implements MeasurableStat { private final double defaultRatio; public TimeRatio(double defaultRatio) { this.defaultRatio = defaultRatio; } Should this check that `defaultRatio` is between `1.0` and `0.0`? public class TimeRatio implements MeasurableStat { private final double defaultRatio; public TimeRatio(double defaultRatio) { + if (defaultRatio < 0.0 || defaultRatio > 1.0) { + throw new IllegalArgumentException("Invalid ratio: value " + defaultRatio + " is not between 0 and 1."); + } + this.defaultRatio = defaultRatio; }
codereview_new_java_data_8278
public void shouldRecordPollIdleRatio() { raftMetrics.updatePollStart(time.milliseconds()); time.sleep(5); - // Measurement arrives before poll end assertEquals(0.6, getMetric(metrics, "poll-idle-ratio-avg").metricValue()); // More idle time for 5ms time.sleep(5); raftMetrics.updatePollEnd(time.milliseconds()); // The measurement includes the interval beginning at the last recording. - // This counts 10ms of busy time and 10ms of idle time. assertEquals(0.5, getMetric(metrics, "poll-idle-ratio-avg").metricValue()); } How about this documenting this information: busy of 10ms and Idle of 5ms + 5ms? public void shouldRecordPollIdleRatio() { raftMetrics.updatePollStart(time.milliseconds()); time.sleep(5); + // Measurement arrives before poll end, so we have 40ms busy time and 60ms idle. + // The subsequent interval time is not counted until the next measurement. assertEquals(0.6, getMetric(metrics, "poll-idle-ratio-avg").metricValue()); // More idle time for 5ms time.sleep(5); raftMetrics.updatePollEnd(time.milliseconds()); // The measurement includes the interval beginning at the last recording. + // This counts 10ms of busy time and 5ms + 5ms = 10ms of idle time. assertEquals(0.5, getMetric(metrics, "poll-idle-ratio-avg").metricValue()); }
codereview_new_java_data_8279
import java.util.Optional; public class ReplicaAlterLogDirsTierStateMachine implements TierStateMachine { public PartitionFetchState start(TopicPartition topicPartition, PartitionFetchState currentFetchState, FetchRequest.PartitionData fetchPartitionData) throws Exception { // JBOD is not supported with tiered storage. - throw new UnsupportedOperationException("Building remote log aux state not supported in ReplicaAlterLogDirsThread."); } public Optional<PartitionFetchState> maybeAdvanceState(TopicPartition topicPartition, nit: add Java Docs import java.util.Optional; +/** + The replica alter log dirs tier state machine is unsupported but is provided to the ReplicaAlterLogDirsThread. + */ public class ReplicaAlterLogDirsTierStateMachine implements TierStateMachine { public PartitionFetchState start(TopicPartition topicPartition, PartitionFetchState currentFetchState, FetchRequest.PartitionData fetchPartitionData) throws Exception { // JBOD is not supported with tiered storage. + throw new UnsupportedOperationException("Building remote log aux state is not supported in ReplicaAlterLogDirsThread."); } public Optional<PartitionFetchState> maybeAdvanceState(TopicPartition topicPartition,
codereview_new_java_data_8282
public AssignmentSpec( ) { Objects.requireNonNull(members); Objects.requireNonNull(topics); - this.members = members; this.topics = topics; } extremely small nit: any reason why we include a newline here but not on the other specs public AssignmentSpec( ) { Objects.requireNonNull(members); Objects.requireNonNull(topics); this.members = members; this.topics = topics; }
codereview_new_java_data_8283
public void testTopicCreateWhenTopicExists() { workerTask.sendRecords(); verifySendRecord(2); } @Test We need to make sure that, under these circumstances, we never tried to create a topic. One way to accomplish this is to verify that we never used the `TopicAdmin` for anything except the call to `describeTopics`: ```suggestion verifySendRecord(2); // Make sure we didn't try to create the topic after finding out it already existed verifyNoMoreInteractions(admin); ``` public void testTopicCreateWhenTopicExists() { workerTask.sendRecords(); verifySendRecord(2); + // Make sure we didn't try to create the topic after finding out it already existed + verifyNoMoreInteractions(admin); } @Test
codereview_new_java_data_8284
boolean joinGroupIfNeeded(final Timer timer) { else if (!future.isRetriable()) throw exception; - // Timer check upon retrying the RetriableExceptions if (timer.isExpired()) { return false; } the previous logic was reverted with some autocorrection to the indentation. boolean joinGroupIfNeeded(final Timer timer) { else if (!future.isRetriable()) throw exception; + // We need to return upon expired timer, in case if the client.poll returns immediately and the time + // has elapsed. if (timer.isExpired()) { return false; }
codereview_new_java_data_8285
*/ package org.apache.kafka.streams.state; -import org.apache.kafka.streams.KeyValue; - import java.util.Objects; /** - * Combines a value from a {@link KeyValue} with a timestamp, for use as the return type * from {@link VersionedKeyValueStore#get(Object, long)} and related methods. * * @param <V> The value type Well, we don't know how `VersionedRecord` is really used, do we? So the reference to `KeyValue` is a little odd? */ package org.apache.kafka.streams.state; import java.util.Objects; /** + * Combines a value (from a key-value record) with a timestamp, for use as the return type * from {@link VersionedKeyValueStore#get(Object, long)} and related methods. * * @param <V> The value type
codereview_new_java_data_8286
public R apply(R record) { @Override public void close() { - Utils.closeQuietly(delegate, "predicated"); Utils.closeQuietly(predicate, "predicate"); } @Override public String toString() { - return "PredicatedTransformation{" + "predicate=" + predicate + ", delegate=" + delegate + ", negate=" + negate + ```suggestion Utils.closeQuietly(delegate, "transformation"); ``` public R apply(R record) { @Override public void close() { + Utils.closeQuietly(delegate, "transformation"); Utils.closeQuietly(predicate, "predicate"); } @Override public String toString() { + return "TransformationStage{" + "predicate=" + predicate + ", delegate=" + delegate + ", negate=" + negate +
codereview_new_java_data_8287
private void applyAndAssert(boolean predicateResult, boolean negate, Predicate<SourceRecord> predicate = mock(Predicate.class); when(predicate.test(any())).thenReturn(predicateResult); @SuppressWarnings("unchecked") - Transformation<SourceRecord> predicatedTransform = mock(Transformation.class); - when(predicatedTransform.apply(any())).thenReturn(transformed); - TransformationStage<SourceRecord> pt = new TransformationStage<>( predicate, negate, - predicatedTransform); - assertEquals(expectedResult, pt.apply(initial)); - pt.close(); verify(predicate).close(); - verify(predicatedTransform).close(); } } ```suggestion TransformationStage<SourceRecord> transformationStage = new TransformationStage<>( ``` nit private void applyAndAssert(boolean predicateResult, boolean negate, Predicate<SourceRecord> predicate = mock(Predicate.class); when(predicate.test(any())).thenReturn(predicateResult); @SuppressWarnings("unchecked") + Transformation<SourceRecord> transformation = mock(Transformation.class); + when(transformation.apply(any())).thenReturn(transformed); + TransformationStage<SourceRecord> stage = new TransformationStage<>( predicate, negate, + transformation); + assertEquals(expectedResult, stage.apply(initial)); + stage.close(); verify(predicate).close(); + verify(transformation).close(); } }
codereview_new_java_data_8288
public boolean includeRecordDetailsInErrorLog() { } /** - * Returns the initialized list of {@link TransformationStage} which are specified in {@link #TRANSFORMS_CONFIG}. */ public <R extends ConnectRecord<R>> List<TransformationStage<R>> transformationStages() { final List<String> transformAliases = getList(TRANSFORMS_CONFIG); This isn't strictly correct anymore; users don't specify `TransformationStage`s in connector configs, they specify `Transformation`s. public boolean includeRecordDetailsInErrorLog() { } /** + * Returns the initialized list of {@link TransformationStage} which apply the + * {@link Transformation transformations} and {@link Predicate predicates} + * as they are specified in the {@link #TRANSFORMS_CONFIG} and {@link #PREDICATES_CONFIG} */ public <R extends ConnectRecord<R>> List<TransformationStage<R>> transformationStages() { final List<String> transformAliases = getList(TRANSFORMS_CONFIG);
codereview_new_java_data_8289
public void run() throws Exception { propagator.sendRPCsToBrokersFromMetadataDelta(delta, image, migrationLeadershipState.zkControllerEpoch()); } else { - log.trace("Not sending RPCs to brokers for metadata {} since no relevant metadata changes", metadataType); } } else { log.info("Ignoring {} {} which contains metadata that has already been written to ZK.", metadataType, provenance); nitpick: perhaps "since no relevant metadata has changd" public void run() throws Exception { propagator.sendRPCsToBrokersFromMetadataDelta(delta, image, migrationLeadershipState.zkControllerEpoch()); } else { + log.trace("Not sending RPCs to brokers for metadata {} since no relevant metadata has changed", metadataType); } } else { log.info("Ignoring {} {} which contains metadata that has already been written to ZK.", metadataType, provenance);
codereview_new_java_data_8290
/** * Fake plugin class for testing classloading isolation. * See {@link org.apache.kafka.connect.runtime.isolation.TestPlugins}. - * <p>Defines a connector as an non-static inner class, which does not have a default constructor. */ public class OuterClass { ```suggestion * <p>Defines a connector as a non-static inner class, which does not have a default constructor. ``` ```suggestion * <p>Defines a connector as a non-static inner class, which does not have a default constructor. ``` /** * Fake plugin class for testing classloading isolation. * See {@link org.apache.kafka.connect.runtime.isolation.TestPlugins}. + * <p>Defines a connector as a non-static inner class, which does not have a default constructor. */ public class OuterClass {
codereview_new_java_data_8291
/** * Fake plugin class for testing classloading isolation. * See {@link org.apache.kafka.connect.runtime.isolation.TestPlugins}. - * This is a plugin co-located with other poorly packaged plugins, but should be visible despite other errors. */ public class CoLocatedPlugin implements Converter { ```suggestion * <p>This is a plugin co-located with other poorly packaged plugins, but should be visible despite other errors. ``` /** * Fake plugin class for testing classloading isolation. * See {@link org.apache.kafka.connect.runtime.isolation.TestPlugins}. + * <p>This is a plugin co-located with other poorly packaged plugins, but should be visible despite other errors. */ public class CoLocatedPlugin implements Converter {
codereview_new_java_data_8292
public Map<String, Object> originals(Map<String, Object> configOverrides) { */ public Map<String, String> originalsStrings() { Map<String, String> copy = new RecordingMap<>(); - copyAsStrings(originals, copy); - return copy; - } - - /** - * Ensures that all values of a map are strings, and copies them to another map. - * @param originals The map to validate. - * @param copy The target to copy to. - */ - protected static void copyAsStrings(Map<String, ?> originals, Map<String, String> copy) { for (Map.Entry<String, ?> entry : originals.entrySet()) { if (!(entry.getValue() instanceof String)) throw new ClassCastException("Non-string value found in original settings for key " + entry.getKey() + ": " + (entry.getValue() == null ? null : entry.getValue().getClass().getName())); copy.put(entry.getKey(), (String) entry.getValue()); } } /** This counts as a change to public interface since subclasses of `AbstractConfig` would be able to access this new method. And since this wasn't mentioned in the KIP, we probably shouldn't do that here. public Map<String, Object> originals(Map<String, Object> configOverrides) { */ public Map<String, String> originalsStrings() { Map<String, String> copy = new RecordingMap<>(); for (Map.Entry<String, ?> entry : originals.entrySet()) { if (!(entry.getValue() instanceof String)) throw new ClassCastException("Non-string value found in original settings for key " + entry.getKey() + ": " + (entry.getValue() == null ? null : entry.getValue().getClass().getName())); copy.put(entry.getKey(), (String) entry.getValue()); } + return copy; } /**
codereview_new_java_data_8293
private void maybeThrowCancellationException(Throwable cause) { * Waits if necessary for this future to complete, and then returns its result. */ @Override - public abstract T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { try { return completableFuture.get(); } catch (ExecutionException e) { Apologies if I misread, why not use the method `get` with timeout line 177? private void maybeThrowCancellationException(Throwable cause) { * Waits if necessary for this future to complete, and then returns its result. */ @Override + public T get() throws InterruptedException, ExecutionException { try { return completableFuture.get(); } catch (ExecutionException e) {
codereview_new_java_data_8294
public Properties configProperties(ConfigResource configResource) { } } - public Map<String, String> configMap(ConfigResource configResource) { ConfigurationImage configurationImage = data.get(configResource); if (configurationImage != null) { return configurationImage.toMap(); It would be good to add JavaDoc here stating that this doesn't handle configuration overrides. Also, how about `configMapForResource` as a name? public Properties configProperties(ConfigResource configResource) { } } + /** + * Return the underlying config data for a given resource as an immutable map. This does not apply + * configuration overrides or include entity defaults for the resource type. + */ + public Map<String, String> configMapForResource(ConfigResource configResource) { ConfigurationImage configurationImage = data.get(configResource); if (configurationImage != null) { return configurationImage.toMap();
codereview_new_java_data_8296
protected void stopServices() { this.configBackingStore.stop(); this.worker.stop(); this.connectorExecutor.shutdown(); - try { - this.connectorClientConfigOverridePolicy.close(); - } catch (Exception e) { - log.warn("Exception while stop connectorClientConfigOverridePolicy:", e); - } } @Override This can be a one-liner with the `Utils` class: ```suggestion Utils.closeQuietly(this.connectorClientConfigOverridePolicy, "connector client config override policy"); ``` protected void stopServices() { this.configBackingStore.stop(); this.worker.stop(); this.connectorExecutor.shutdown(); + Utils.closeQuietly(this.connectorClientConfigOverridePolicy, "connector client config override policy"); } @Override
codereview_new_java_data_8297
public KeyValue<Bytes, byte[]> makeNext() { @Override public synchronized void close() { - if (closeCallback != null) { - closeCallback.run(); } iterNoTimestamp.close(); iterWithTimestamp.close(); open = false; Don't we require that we always have a registered `closeCallback`? If yes, it seems invalid that it's `null` and having this check might mask a bug, and thus we should rather not have it, but let it crash with a NPE? (Also elsewhere.) public KeyValue<Bytes, byte[]> makeNext() { @Override public synchronized void close() { + if (closeCallback == null) { + throw new IllegalStateException("RocksDBDualCFIterator expects close callback to be set immediately upon creation"); } + closeCallback.run(); + iterNoTimestamp.close(); iterWithTimestamp.close(); open = false;
codereview_new_java_data_8299
private void trySend(final long currentTimeMs) { if (unsent.timer.isExpired()) { iterator.remove(); unsent.callback.onFailure(new TimeoutException( - "Failed to send request after " + unsent.timer.timeoutMs() + " " + "ms.")); continue; } maybe ` "Failed to send request after " + unsent.timer.timeoutMs() + " ms."` I'm not sure why I added an extra `+ " "` private void trySend(final long currentTimeMs) { if (unsent.timer.isExpired()) { iterator.remove(); unsent.callback.onFailure(new TimeoutException( + "Failed to send request after " + unsent.timer.timeoutMs() + " ms.")); continue; }
codereview_new_java_data_8300
public class DistributedHerder extends AbstractHerder implements Runnable { * @param uponShutdown any {@link AutoCloseable} objects that should be closed when this herder is {@link #stop() stopped}, * after all services and resources owned by this herder are stopped */ - // TODO: Do we really need two separate public constructors? public DistributedHerder(DistributedConfig config, Time time, Worker worker, interesting, the for-testing constructor appears unused, and has been for as long as it's existed (ever since #321). We can push this refactor out to a different PR. public class DistributedHerder extends AbstractHerder implements Runnable { * @param uponShutdown any {@link AutoCloseable} objects that should be closed when this herder is {@link #stop() stopped}, * after all services and resources owned by this herder are stopped */ public DistributedHerder(DistributedConfig config, Time time, Worker worker,
codereview_new_java_data_8301
public class MirrorMakerConfig extends AbstractConfig { static final String TARGET_PREFIX = "target."; static final String ENABLE_INTERNAL_REST_CONFIG = "dedicated.mode.enable.internal.rest"; private static final String ENABLE_INTERNAL_REST_DOC = - "Whether to bring up an internal-only REST server that allows multi-node clusters to operate correctly"; private final Plugins plugins; Nit: We usually have a period at the end of descriptions. public class MirrorMakerConfig extends AbstractConfig { static final String TARGET_PREFIX = "target."; static final String ENABLE_INTERNAL_REST_CONFIG = "dedicated.mode.enable.internal.rest"; private static final String ENABLE_INTERNAL_REST_DOC = + "Whether to bring up an internal-only REST server that allows multi-node clusters to operate correctly."; private final Plugins plugins;
codereview_new_java_data_8302
public void initializeServer() { protected final void initializeResources() { log.info("Initializing REST resources"); - this.resources = new ArrayList<>(); ResourceConfig resourceConfig = new ResourceConfig(); resourceConfig.register(new JacksonJsonProvider()); nit: slightly more readable without `this.` here public void initializeServer() { protected final void initializeResources() { log.info("Initializing REST resources"); + resources = new ArrayList<>(); ResourceConfig resourceConfig = new ResourceConfig(); resourceConfig.register(new JacksonJsonProvider());
codereview_new_java_data_8304
public void afterEach() { public void kafkaVersion() { String out = executeAndGetOut("--version"); assertNormalExit(); - assertEquals(AppInfoParser.getVersion(), out); } @Test This fails when running in Intellij. The output is: ``` [2023-02-01 16:49:33,671] WARN Error while loading kafka-version.properties: inStream parameter is null (org.apache.kafka.common.utils.AppInfoParser:46) unknown ``` I guess all we could simply do `assertTrue(out.contains(AppInfoParser.getVersion()));` public void afterEach() { public void kafkaVersion() { String out = executeAndGetOut("--version"); assertNormalExit(); + assertTrue(out.contains(AppInfoParser.getVersion())); } @Test
codereview_new_java_data_8306
public void reconfigure(Map<String, String> props) { /** * Validate the connector configuration values against configuration definitions. * @param connectorConfigs the provided configuration values - * @return {@link Config}, essentially a list of {@link ConfigValue}s containing the updated configuration - * information given the current configuration values. */ public Config validate(Map<String, String> connectorConfigs) { ConfigDef configDef = config(); ```suggestion * @return a parsed and validated {@link Config} containing any relevant validation errors with the raw * {@code connectorConfigs} which should prevent this configuration from being used. ``` public void reconfigure(Map<String, String> props) { /** * Validate the connector configuration values against configuration definitions. * @param connectorConfigs the provided configuration values + * @return a parsed and validated {@link Config} containing any relevant validation errors with the raw + * {@code connectorConfigs} which should prevent this configuration from being used. */ public Config validate(Map<String, String> connectorConfigs) { ConfigDef configDef = config();
codereview_new_java_data_8307
public interface ConnectorClientConfigOverridePolicy extends Configurable, AutoC /** - * Workers will invoke this while constructing producer for SourceConnectors, DLQs for SinkConnectors and - * consumers for SinkConnectors to validate if all of the overridden client configurations are allowed per the - * policy implementation. This would also be invoked during the validation of connector configs via the REST API. * <p> * If there are any policy violations, the connector will not be started. * We probably don't need to enumerate all of the use-cases for kafka clients here, and can keep that scoped to the ConnectorClientConfigRequest javadoc. ```suggestion * Workers will invoke this before configuring per-connector Kafka admin, producer, and consumer * client instances to validate if all of the overridden client configurations are allowed per the ``` public interface ConnectorClientConfigOverridePolicy extends Configurable, AutoC /** + * Workers will invoke this before configuring per-connector Kafka admin, producer, and consumer client instances + * to validate if all the overridden client configurations are allowed per the policy implementation. + * This would also be invoked during the validation of connector configs via the REST API. * <p> * If there are any policy violations, the connector will not be started. *
codereview_new_java_data_8308
public void initialize(SinkTaskContext context) { /** * Put the records in the sink. This should either write them to the downstream system or batch them for - * later writing * <p> * If this operation fails, the SinkTask may throw a {@link org.apache.kafka.connect.errors.RetriableException} to * indicate that the framework should attempt to retry the same call again. Other exceptions will cause the task to I agree that it's not necessary for this javadoc to prescribe asynchronous behavior, but it should certainly point out the pitfall that asynchronous users need to take special care. ```suggestion * Put the records in the sink. If this method returns before the records are durably written, * the task must implement {@link #flush(Map)} or {@link #preCommit(Map)} to ensure that * only durably written record offsets are committed, and that no records are dropped during failures. ``` public void initialize(SinkTaskContext context) { /** * Put the records in the sink. This should either write them to the downstream system or batch them for + * later writing. If this method returns before the records are written to the downstream system, the task must + * implement {@link #flush(Map)} or {@link #preCommit(Map)} to ensure that offsets are only committed for records + * that have been written to the downstream system (hence avoiding data loss during failures). * <p> * If this operation fails, the SinkTask may throw a {@link org.apache.kafka.connect.errors.RetriableException} to * indicate that the framework should attempt to retry the same call again. Other exceptions will cause the task to
codereview_new_java_data_8312
public void testCreateTopicsWithConfigs() throws Exception { assertEquals(TopicRecord.class, result4.records().get(0).message().getClass()); TopicRecord batchedTopic1Record = (TopicRecord) result4.records().get(0).message(); assertEquals(batchedTopic1, batchedTopic1Record.name()); - assertEquals(ConfigRecord.class, result4.records().get(1).message().getClass()); - assertEquals(batchedTopic1, ((ConfigRecord) result4.records().get(1).message()).resourceName()); assertEquals(PartitionRecord.class, result4.records().get(2).message().getClass()); assertEquals(batchedTopic1Record.topicId(), ((PartitionRecord) result4.records().get(2).message()).topicId()); } Wonder if it would be better to assert the full ConfigRecord instead of just checking one field? public void testCreateTopicsWithConfigs() throws Exception { assertEquals(TopicRecord.class, result4.records().get(0).message().getClass()); TopicRecord batchedTopic1Record = (TopicRecord) result4.records().get(0).message(); assertEquals(batchedTopic1, batchedTopic1Record.name()); + assertEquals(new ConfigRecord() + .setResourceName(batchedTopic1) + .setResourceType(ConfigResource.Type.TOPIC.id()) + .setName("foo") + .setValue("notNull"), + result4.records().get(1).message()); assertEquals(PartitionRecord.class, result4.records().get(2).message().getClass()); assertEquals(batchedTopic1Record.topicId(), ((PartitionRecord) result4.records().get(2).message()).topicId()); }
codereview_new_java_data_8313
public void testCompatibilityWithClusterId() throws IOException { // We initialized a state from the metadata log assertTrue(stateFile.exists()); String jsonString = "{\"clusterId\":\"abc\",\"leaderId\":0,\"leaderEpoch\":0,\"votedId\":-1,\"appliedOffset\":0,\"currentVoters\":[],\"data_version\":0}"; writeToStateFile(stateFile, jsonString); // verify that we can read the state file that contains the removed "cluserId" field. - assertEquals(stateStore.readElectionState(), new ElectionState(0, - OptionalInt.of(0), OptionalInt.empty(), Collections.emptySet())); stateStore.clear(); assertFalse(stateFile.exists()); 1. we should put the expected result in the first parameter, so that the failed output will be meaningful. (ref: [here](https://junit.org/junit5/docs/5.0.1/api/org/junit/jupiter/api/Assertions.html#assertEquals-java.lang.Object-java.lang.Object-)) 2. I saw we use `ElectionState.withElectedLeader`, instead of `new ElectionState` in other tests. Do you think we can use the former one for consistency? public void testCompatibilityWithClusterId() throws IOException { // We initialized a state from the metadata log assertTrue(stateFile.exists()); + String jsonString = "{\"clusterId\":\"abc\",\"leaderId\":0,\"leaderEpoch\":0,\"votedId\":-1,\"appliedOffset\":0,\"currentVoters\":[],\"data_version\":0}"; writeToStateFile(stateFile, jsonString); // verify that we can read the state file that contains the removed "cluserId" field. + assertEquals(ElectionState.withElectedLeader(0, 0, Collections.emptySet()), stateStore.readElectionState()); stateStore.clear(); assertFalse(stateFile.exists());
codereview_new_java_data_8314
public void add(TopicPartition topicPartition, PartitionData data) { } } - /** - * Build a FetchRequestData for the provided partitions - * @throws IllegalStateException if it has already been called - */ public FetchRequestData build() { - if (next == null) { - throw new IllegalStateException("build() has already been called."); - } boolean canUseTopicIds = partitionsWithoutTopicIds == 0; if (nextMetadata.isFull()) { This is not a test change? public void add(TopicPartition topicPartition, PartitionData data) { } } public FetchRequestData build() { boolean canUseTopicIds = partitionsWithoutTopicIds == 0; if (nextMetadata.isFull()) {
codereview_new_java_data_8315
public boolean isCancelled() { public boolean isDone() { return true; } @Override public Void get() { return null; add a newline above. public boolean isCancelled() { public boolean isDone() { return true; } + @Override public Void get() { return null;
codereview_new_java_data_8317
import org.apache.kafka.common.utils.FetchRequestUtils; public enum FetchIsolation { - FETCH_LOG_END, - FETCH_HIGH_WATERMARK, - FETCH_TXN_COMMITTED; public static FetchIsolation apply(FetchRequest request) { return apply(request.replicaId(), request.isolationLevel()); } public static FetchIsolation apply(int replicaId, IsolationLevel isolationLevel) { if (!FetchRequestUtils.isConsumer(replicaId)) { - return FETCH_LOG_END; } else if (isolationLevel == IsolationLevel.READ_COMMITTED) { - return FETCH_TXN_COMMITTED; } else { - return FETCH_HIGH_WATERMARK; } } } We can probably remove the `FETCH` prefix. import org.apache.kafka.common.utils.FetchRequestUtils; public enum FetchIsolation { + LOG_END, + HIGH_WATERMARK, + TXN_COMMITTED; public static FetchIsolation apply(FetchRequest request) { return apply(request.replicaId(), request.isolationLevel()); } public static FetchIsolation apply(int replicaId, IsolationLevel isolationLevel) { if (!FetchRequestUtils.isConsumer(replicaId)) { + return LOG_END; } else if (isolationLevel == IsolationLevel.READ_COMMITTED) { + return TXN_COMMITTED; } else { + return HIGH_WATERMARK; } } }
codereview_new_java_data_8319
static ProcessorSupplier<Object, Object, Void, Void> printProcessorSupplier(fina @Override public void init(final ProcessorContext<Void, Void> context) { super.init(context); - System.out.println("[3.2] initializing processor: topic=" + topic + " taskId=" + context.taskId()); System.out.flush(); numRecordsProcessed = 0; smallestOffset = Long.MAX_VALUE; ```suggestion System.out.println("[3.3] initializing processor: topic=" + topic + " taskId=" + context.taskId()); ``` static ProcessorSupplier<Object, Object, Void, Void> printProcessorSupplier(fina @Override public void init(final ProcessorContext<Void, Void> context) { super.init(context); + System.out.println("[3.3] initializing processor: topic=" + topic + " taskId=" + context.taskId()); System.out.flush(); numRecordsProcessed = 0; smallestOffset = Long.MAX_VALUE;
codereview_new_java_data_8323
public KafkaMetricsGroup(Class<?> klass) { * @return Sanitized metric name object. */ public MetricName metricName(String name, Map<String, String> tags) { - String pkg; - if (klass.getPackage() == null) { - pkg = ""; - } else { - pkg = klass.getPackage().getName(); - } String simpleName = klass.getSimpleName().replaceAll("\\$$", ""); return explicitMetricName(pkg, simpleName, name, tags); } ternary operator would look great here public KafkaMetricsGroup(Class<?> klass) { * @return Sanitized metric name object. */ public MetricName metricName(String name, Map<String, String> tags) { + String pkg = klass.getPackage() == null ? "" : klass.getPackage().getName(); String simpleName = klass.getSimpleName().replaceAll("\\$$", ""); return explicitMetricName(pkg, simpleName, name, tags); }
codereview_new_java_data_8324
public KafkaMetricsGroup(Class<?> klass) { * @return Sanitized metric name object. */ public MetricName metricName(String name, Map<String, String> tags) { - String pkg; - if (klass.getPackage() == null) { - pkg = ""; - } else { - pkg = klass.getPackage().getName(); - } String simpleName = klass.getSimpleName().replaceAll("\\$$", ""); return explicitMetricName(pkg, simpleName, name, tags); } Can we please file a JIRA for removing this? I don't think it's required if we don't pass the class from Scala objects (versus classes). public KafkaMetricsGroup(Class<?> klass) { * @return Sanitized metric name object. */ public MetricName metricName(String name, Map<String, String> tags) { + String pkg = klass.getPackage() == null ? "" : klass.getPackage().getName(); String simpleName = klass.getSimpleName().replaceAll("\\$$", ""); return explicitMetricName(pkg, simpleName, name, tags); }
codereview_new_java_data_8325
public KafkaMetricsGroup(Class<?> klass) { * @return Sanitized metric name object. */ public MetricName metricName(String name, Map<String, String> tags) { - String pkg; - if (klass.getPackage() == null) { - pkg = ""; - } else { - pkg = klass.getPackage().getName(); - } String simpleName = klass.getSimpleName().replaceAll("\\$$", ""); return explicitMetricName(pkg, simpleName, name, tags); } We don't use `final` for parameters and local variables in Kafka (the stream modules are an exception). public KafkaMetricsGroup(Class<?> klass) { * @return Sanitized metric name object. */ public MetricName metricName(String name, Map<String, String> tags) { + String pkg = klass.getPackage() == null ? "" : klass.getPackage().getName(); String simpleName = klass.getSimpleName().replaceAll("\\$$", ""); return explicitMetricName(pkg, simpleName, name, tags); }
codereview_new_java_data_8326
public KafkaMetricsGroup(Class<?> klass) { * @return Sanitized metric name object. */ public MetricName metricName(String name, Map<String, String> tags) { - String pkg; - if (klass.getPackage() == null) { - pkg = ""; - } else { - pkg = klass.getPackage().getName(); - } String simpleName = klass.getSimpleName().replaceAll("\\$$", ""); return explicitMetricName(pkg, simpleName, name, tags); } Should this be a static method? Does it rely on any class state? public KafkaMetricsGroup(Class<?> klass) { * @return Sanitized metric name object. */ public MetricName metricName(String name, Map<String, String> tags) { + String pkg = klass.getPackage() == null ? "" : klass.getPackage().getName(); String simpleName = klass.getSimpleName().replaceAll("\\$$", ""); return explicitMetricName(pkg, simpleName, name, tags); }
codereview_new_java_data_8327
Optional<Checkpoint> checkpoint(String group, TopicPartition topicPartition, } } return Optional.empty(); - } SourceRecord checkpointRecord(Checkpoint checkpoint, long timestamp) { nit: Can you remove this unnecessary line? Optional<Checkpoint> checkpoint(String group, TopicPartition topicPartition, } } return Optional.empty(); } SourceRecord checkpointRecord(Checkpoint checkpoint, long timestamp) {
codereview_new_java_data_8328
public void testWithClassLoader() { assertNotEquals(dummyClassLoader, Thread.currentThread().getContextClassLoader()); } - private class DummyClassLoader extends ClassLoader { } } This should be a static class. public void testWithClassLoader() { assertNotEquals(dummyClassLoader, Thread.currentThread().getContextClassLoader()); } + private static class DummyClassLoader extends ClassLoader { } }
codereview_new_java_data_8330
* -----checkpoint file end---------- */ public class LeaderEpochCheckpointFile implements LeaderEpochCheckpoint { private static final String LEADER_EPOCH_CHECKPOINT_FILENAME = "leader-epoch-checkpoint"; private static final Pattern WHITE_SPACES_PATTERN = Pattern.compile("\\s+"); private static final int CURRENT_VERSION = 0; - public static final Formatter FORMATTER = new Formatter(); - private final CheckpointFileWithFailureHandler<EpochEntry> checkpoint; public LeaderEpochCheckpointFile(File file, LogDirFailureChannel logDirFailureChannel) throws IOException { Please have the public field(s) before the private ones. * -----checkpoint file end---------- */ public class LeaderEpochCheckpointFile implements LeaderEpochCheckpoint { + + public static final Formatter FORMATTER = new Formatter(); + private static final String LEADER_EPOCH_CHECKPOINT_FILENAME = "leader-epoch-checkpoint"; private static final Pattern WHITE_SPACES_PATTERN = Pattern.compile("\\s+"); private static final int CURRENT_VERSION = 0; private final CheckpointFileWithFailureHandler<EpochEntry> checkpoint; public LeaderEpochCheckpointFile(File file, LogDirFailureChannel logDirFailureChannel) throws IOException {
codereview_new_java_data_8334
public class ProducerStateEntry { public OptionalLong currentTxnFirstOffset; public ProducerStateEntry(long producerId) { - this(producerId, null, RecordBatch.NO_PRODUCER_EPOCH, -1, RecordBatch.NO_TIMESTAMP, OptionalLong.empty()); } - public ProducerStateEntry(long producerId, short producerEpoch, int coordinatorEpoch, long lastTimestamp, OptionalLong currentTxnFirstOffset) { - this(producerId, null, producerEpoch, coordinatorEpoch, lastTimestamp, currentTxnFirstOffset); - } - - public ProducerStateEntry(long producerId, BatchMetadata firstBatchMetadata, short producerEpoch, int coordinatorEpoch, long lastTimestamp, OptionalLong currentTxnFirstOffset) { this.producerId = producerId; this.producerEpoch = producerEpoch; this.coordinatorEpoch = coordinatorEpoch; this.lastTimestamp = lastTimestamp; this.currentTxnFirstOffset = currentTxnFirstOffset; - if (firstBatchMetadata != null) batchMetadata.add(firstBatchMetadata); } public int firstSeq() { I think we should move the optional `firstBatchMetadata` to the end of the parameter list. We should also consider whether we need the two constructor overloads or if we can simply make it `Optional<BatchMetadata>`. Looking at the number of constructor invocations, I would go with the latter. public class ProducerStateEntry { public OptionalLong currentTxnFirstOffset; public ProducerStateEntry(long producerId) { + this(producerId, RecordBatch.NO_PRODUCER_EPOCH, -1, RecordBatch.NO_TIMESTAMP, OptionalLong.empty(), Optional.empty()); } + public ProducerStateEntry(long producerId, short producerEpoch, int coordinatorEpoch, long lastTimestamp, OptionalLong currentTxnFirstOffset, Optional<BatchMetadata> firstBatchMetadata) { this.producerId = producerId; this.producerEpoch = producerEpoch; this.coordinatorEpoch = coordinatorEpoch; this.lastTimestamp = lastTimestamp; this.currentTxnFirstOffset = currentTxnFirstOffset; + firstBatchMetadata.ifPresent(batchMetadata::add); } public int firstSeq() {
codereview_new_java_data_8339
public void maybeAddOfflineLogDir(String logDir, String msg, IOException e) { * The method will wait if necessary until a new offline log directory becomes available * * @return The next offline log dir. */ - public String takeNextOfflineLogDir() { - try { - return offlineLogDirQueue.take(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } } } Why are we changing the behavior here? I think it would be better to throw `InterruptedException` as it did before. The callers are Scala code and should continue to work as before. As we convert the upper layers, we'll have to reconsider the exception handling, but it's simpler to leave things as they are now. public void maybeAddOfflineLogDir(String logDir, String msg, IOException e) { * The method will wait if necessary until a new offline log directory becomes available * * @return The next offline log dir. + * @throws InterruptedException */ + public String takeNextOfflineLogDir() throws InterruptedException { + return offlineLogDirQueue.take(); } }
codereview_new_java_data_8344
private boolean onSameSegment(LogOffsetMetadata that) { // if they are on the same segment and this offset precedes the given offset public int positionDiff(LogOffsetMetadata that) { if (messageOffsetOnly()) - throw new KafkaException(this + " cannot compare its segment info with " + that + " since it only has message offset info"); if (!onSameSegment(that)) throw new KafkaException(this + " cannot compare its segment position with " + that + " since they are not on the same segment"); "cannot compare its segment info" should be "cannot compare its segment position" (that was the difference between the message in `positionDiff` versus the one in `onSameSegment`. private boolean onSameSegment(LogOffsetMetadata that) { // if they are on the same segment and this offset precedes the given offset public int positionDiff(LogOffsetMetadata that) { if (messageOffsetOnly()) + throw new KafkaException(this + " cannot compare its segment position with " + that + " since it only has message offset info"); if (!onSameSegment(that)) throw new KafkaException(this + " cannot compare its segment position with " + that + " since they are not on the same segment");
codereview_new_java_data_8345
public void writeUnsignedVarint(int i) { public void writeByteBuffer(ByteBuffer buf) { try { if (buf.hasArray()) { - out.write(buf.array(), buf.arrayOffset(), buf.limit()); } else { byte[] bytes = Utils.toArray(buf); out.write(bytes); Should it be `buf.position() + buf.arrayOffset()`? > If this buffer is backed by an array then buffer position p corresponds to array index p + arrayOffset(). https://docs.oracle.com/javase/8/docs/api/java/nio/Buffer.html#arrayOffset-- public void writeUnsignedVarint(int i) { public void writeByteBuffer(ByteBuffer buf) { try { if (buf.hasArray()) { + out.write(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); } else { byte[] bytes = Utils.toArray(buf); out.write(bytes);
codereview_new_java_data_8346
Map<TaskId, Task> allTasks() { } } Map<TaskId, Task> notPausedTasks() { return Collections.unmodifiableMap(tasks.allTasks() .stream() I could not find a test for this change. Could you add one? Map<TaskId, Task> allTasks() { } } + /** + * Returns tasks owned by the stream thread. With state updater disabled, these are all tasks. With + * state updater enabled, this does not return any tasks currently owned by the state updater. + * @return + */ + Map<TaskId, Task> allOwnedTasks() { + // not bothering with an unmodifiable map, since the tasks themselves are mutable, but + // if any outside code modifies the map or the tasks, it would be a severe transgression. + return tasks.allTasksPerId(); + } + Map<TaskId, Task> notPausedTasks() { return Collections.unmodifiableMap(tasks.allTasks() .stream()
codereview_new_java_data_8347
public void clearTaskTimeout() { @Override public boolean commitNeeded() { - return task.commitNeeded(); } @Override Why do we need to change these two functions? public void clearTaskTimeout() { @Override public boolean commitNeeded() { + throw new UnsupportedOperationException("This task is read-only"); } @Override
codereview_new_java_data_8349
private Optional<Exception> isValid(final Map<TopicPartition, OffsetAndMetadata> @Override public String toString() { - return getClass() + "_" + this.offsets; } } need to refactor this sad toString method. private Optional<Exception> isValid(final Map<TopicPartition, OffsetAndMetadata> @Override public String toString() { + return "CommitApplicationEvent(" + + "offsets=" + offsets + ")"; } }
codereview_new_java_data_8350
private boolean process(final NoopApplicationEvent event) { private boolean process(final PollApplicationEvent event) { Optional<RequestManager> commitRequestManger = registry.get(RequestManager.Type.COMMIT); if (!commitRequestManger.isPresent()) { - return false; } CommitRequestManager manager = (CommitRequestManager) commitRequestManger.get(); Why return `false` here? If the user did not set `group.id` and never use auto or manual commits, then here we should just skip right? private boolean process(final NoopApplicationEvent event) { private boolean process(final PollApplicationEvent event) { Optional<RequestManager> commitRequestManger = registry.get(RequestManager.Type.COMMIT); if (!commitRequestManger.isPresent()) { + return true; } CommitRequestManager manager = (CommitRequestManager) commitRequestManger.get();
codereview_new_java_data_8351
private void produceValueRange(final int key, final int start, final int endExcl private Properties streamsConfiguration() { final Properties config = new Properties(); config.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE); - config.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-" + this.appId); config.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:" + (++port)); config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); config.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()); nit: ```suggestion config.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-" + appId); ``` private void produceValueRange(final int key, final int start, final int endExcl private Properties streamsConfiguration() { final Properties config = new Properties(); config.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE); + config.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-" + appId); config.put(StreamsConfig.APPLICATION_SERVER_CONFIG, "localhost:" + (++port)); config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); config.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
codereview_new_java_data_8352
public void configure(Map<String, ?> configs) { @Override public void close() throws IOException { - ClassLoader originalClassLoader = Thread.currentThread().getContextClassLoader(); - Thread.currentThread().setContextClassLoader(rsmClassLoader); - try { delegate.close(); - } finally { - Thread.currentThread().setContextClassLoader(originalClassLoader); - } } - private <T> T withClassLoader(RemoteStorageAction<T> action) throws RemoteStorageException { ClassLoader originalClassLoader = Thread.currentThread().getContextClassLoader(); Thread.currentThread().setContextClassLoader(rsmClassLoader); try { Perhaps a better option would be an interface like: ```java interface ClassLoaderAction<T, E extends Exception> { T execute() throws E; } ``` Then you can use it from `close` too. public void configure(Map<String, ?> configs) { @Override public void close() throws IOException { + withClassLoader(() -> { delegate.close(); + return null; + }); } + private <T, E extends Exception> T withClassLoader(ClassLoaderAction<T, E> action) throws E { ClassLoader originalClassLoader = Thread.currentThread().getContextClassLoader(); Thread.currentThread().setContextClassLoader(rsmClassLoader); try {
codereview_new_java_data_8353
public class TimeIndex extends AbstractIndex { private volatile TimestampOffset lastEntry; - public TimeIndex(File file, long baseOffset) throws IOException { - this(file, baseOffset, -1); - } - public TimeIndex(File file, long baseOffset, int maxIndexSize) throws IOException { this(file, baseOffset, maxIndexSize, true); } This constructor seems unused? public class TimeIndex extends AbstractIndex { private volatile TimestampOffset lastEntry; public TimeIndex(File file, long baseOffset, int maxIndexSize) throws IOException { this(file, baseOffset, maxIndexSize, true); }
codereview_new_java_data_8355
public class MigrationControlManager { zkMigrationState = new TimelineObject<>(snapshotRegistry, ZkMigrationState.NONE); } - public ZkMigrationState zkMigrationState() { return zkMigrationState.get(); } does this need to be a public function or can it be package-private public class MigrationControlManager { zkMigrationState = new TimelineObject<>(snapshotRegistry, ZkMigrationState.NONE); } + ZkMigrationState zkMigrationState() { return zkMigrationState.get(); }
codereview_new_java_data_8358
* topic partition from offset 0 up to but not including the end offset in the snapshot * id. * - * @see org.apache.kafka.raft.KafkaRaftClient#createSnapshot(OffsetAndEpoch, long) */ public interface SnapshotWriter<T> extends AutoCloseable { /** Maybe this should point to `RaftClient.createSnapshot` since that is what we documented. ``` * @see org.apache.kafka.raft.RaftClient#createSnapshot(OffsetAndEpoch, long) ``` * topic partition from offset 0 up to but not including the end offset in the snapshot * id. * + * @see org.apache.kafka.raft.RaftClient#createSnapshot(OffsetAndEpoch, long) */ public interface SnapshotWriter<T> extends AutoCloseable { /**
codereview_new_java_data_8359
public static ApiVersionsResponse createApiVersionsResponse( } public static ApiVersionCollection filterApis( - Set<ApiKeys> enabledApi, RecordVersion minRecordVersion ) { ApiVersionCollection apiKeys = new ApiVersionCollection(); - for (ApiKeys apiKey : enabledApi) { if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) { apiKeys.add(ApiVersionsResponse.toApiVersion(apiKey)); } nit: Do we want to make this plural `enabledApis` public static ApiVersionsResponse createApiVersionsResponse( } public static ApiVersionCollection filterApis( + Set<ApiKeys> enabledApis, RecordVersion minRecordVersion ) { ApiVersionCollection apiKeys = new ApiVersionCollection(); + for (ApiKeys apiKey : enabledApis) { if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) { apiKeys.add(ApiVersionsResponse.toApiVersion(apiKey)); }
codereview_new_java_data_8360
public enum Errors { OFFSET_MOVED_TO_TIERED_STORAGE(109, "The requested offset is moved to tiered storage.", OffsetMovedToTieredStorageException::new), FENCED_MEMBER_EPOCH(110, "The member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoins.", FencedMemberEpochException::new), UNRELEASED_INSTANCE_ID(111, "The instance ID is still used by another member in the consumer group. That member must leave first.", UnreleasedInstanceIdException::new), - UNSUPPORTED_ASSIGNOR(112, "The assignor used by the member or its version range are not supported by the consumer group.", UnsupportedAssignorException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); nit: is not how's "The selected assignor or its version range is not supported by the consumer group."? public enum Errors { OFFSET_MOVED_TO_TIERED_STORAGE(109, "The requested offset is moved to tiered storage.", OffsetMovedToTieredStorageException::new), FENCED_MEMBER_EPOCH(110, "The member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoins.", FencedMemberEpochException::new), UNRELEASED_INSTANCE_ID(111, "The instance ID is still used by another member in the consumer group. That member must leave first.", UnreleasedInstanceIdException::new), + UNSUPPORTED_ASSIGNOR(112, "The assignor or its version range is not supported by the consumer group.", UnsupportedAssignorException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class);
codereview_new_java_data_8361
*/ package org.apache.kafka.common.errors; public class FencedMemberEpochException extends ApiException { public FencedMemberEpochException(String message) { super(message); We haven't done it for the other exception types, but I wonder if it makes sense to add the `@InterfaceStability.Evolving` to these types since they are public. */ package org.apache.kafka.common.errors; +import org.apache.kafka.common.annotation.InterfaceStability; + +@InterfaceStability.Evolving public class FencedMemberEpochException extends ApiException { public FencedMemberEpochException(String message) { super(message);
codereview_new_java_data_8362
public void shouldThrowIllegalArgumentExceptionWhenCustomPartitionerReturnsMulti for (final KafkaStreams stream: kafkaStreamsList) { stream.setUncaughtExceptionHandler(e -> { - Assertions.assertEquals("The partitions returned by StreamPartitioner#partitions method when used for FK join should be a singleton set", e.getCause().getMessage()); return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT; }); } We've been moving to `assertThat` instead of `assertEquals` and co: ```suggestion assertThat(e.getCause().getMessage(), equalTo("The partitions returned by StreamPartitioner#partitions method when used for FK join should be a singleton set")); ``` public void shouldThrowIllegalArgumentExceptionWhenCustomPartitionerReturnsMulti for (final KafkaStreams stream: kafkaStreamsList) { stream.setUncaughtExceptionHandler(e -> { + assertThat(e.getCause().getMessage(), equalTo("The partitions returned by StreamPartitioner#partitions method when used for FK join should be a singleton set")); return StreamsUncaughtExceptionHandler.StreamThreadExceptionResponse.SHUTDOWN_CLIENT; }); }
codereview_new_java_data_8365
public void shouldEnablePartitionAutoscaling() { public void shouldSetPartitionAutoscalingTimeout() { props.put("partition.autoscaling.timeout.ms", 0L); final StreamsConfig config = new StreamsConfig(props); - assertThat(config.getBoolean(PARTITION_AUTOSCALING_TIMEOUT_MS_CONFIG), is(0L)); } static class MisconfiguredSerde implements Serde<Object> { oops! `config.getLong` here public void shouldEnablePartitionAutoscaling() { public void shouldSetPartitionAutoscalingTimeout() { props.put("partition.autoscaling.timeout.ms", 0L); final StreamsConfig config = new StreamsConfig(props); + assertThat(config.getLong(PARTITION_AUTOSCALING_TIMEOUT_MS_CONFIG), is(0L)); } static class MisconfiguredSerde implements Serde<Object> {
codereview_new_java_data_8368
protected AbstractControlRequest(ApiKeys api, short version) { public abstract int controllerId(); - public abstract int kraftControllerId(); public abstract int controllerEpoch(); I didn't see how is this field used in `KafkaApis`. protected AbstractControlRequest(ApiKeys api, short version) { public abstract int controllerId(); + public abstract boolean isKRaftController(); public abstract int controllerEpoch();
codereview_new_java_data_8369
public boolean isNoOpRecordSupported() { return this.isAtLeast(IBP_3_3_IV1); } - public boolean isApiForwardingSupported() { return this.isAtLeast(IBP_3_4_IV0); } nit: Should we say "isApiForwardingEnabled" since we require API forwarding at this version? public boolean isNoOpRecordSupported() { return this.isAtLeast(IBP_3_3_IV1); } + public boolean isApiForwardingEnabled() { return this.isAtLeast(IBP_3_4_IV0); }
codereview_new_java_data_8371
public LeaderAndIsrRequest build(short version) { ).collect(Collectors.toList()); LeaderAndIsrRequestData data = new LeaderAndIsrRequestData() .setControllerEpoch(controllerEpoch) .setBrokerEpoch(brokerEpoch) .setLiveLeaders(leaders); - if (kraftController) { - data.setKRaftControllerId(controllerId).setControllerId(-1); - } else { - data.setControllerId(controllerId).setKRaftControllerId(-1); - } if (version >= 2) { Map<String, LeaderAndIsrTopicState> topicStatesMap = groupByTopic(partitionStates, topicIds); Maybe this is just me, but this if block seems confusing can we do something like ``` .setControllerId(kraftController ? -1 : controllerId) .setKRaftControllerId(kraftController ? controllerId : -1) ``` public LeaderAndIsrRequest build(short version) { ).collect(Collectors.toList()); LeaderAndIsrRequestData data = new LeaderAndIsrRequestData() + .setControllerId(kraftController ? -1 : controllerId) + .setKRaftControllerId(kraftController ? controllerId : -1) .setControllerEpoch(controllerEpoch) .setBrokerEpoch(brokerEpoch) .setLiveLeaders(leaders); if (version >= 2) { Map<String, LeaderAndIsrTopicState> topicStatesMap = groupByTopic(partitionStates, topicIds);
codereview_new_java_data_8372
public static class Builder extends AbstractControlRequest.Builder<LeaderAndIsrR public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch, List<LeaderAndIsrPartitionState> partitionStates, Map<String, Uuid> topicIds, Collection<Node> liveLeaders) { - super(ApiKeys.LEADER_AND_ISR, version, controllerId, controllerEpoch, brokerEpoch, false); - this.partitionStates = partitionStates; - this.topicIds = topicIds; - this.liveLeaders = liveLeaders; } public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch, let's define this in terms of the other constructor to avoid duplicating the below code. something like: ``` this(version, controllerId, controllerEpoch, brokerEpoch, false) ``` public static class Builder extends AbstractControlRequest.Builder<LeaderAndIsrR public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch, List<LeaderAndIsrPartitionState> partitionStates, Map<String, Uuid> topicIds, Collection<Node> liveLeaders) { + this(version, controllerId, controllerEpoch, brokerEpoch, partitionStates, topicIds, + liveLeaders, false); } public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch,
codereview_new_java_data_8373
public static class Builder extends AbstractControlRequest.Builder<StopReplicaRe public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch, boolean deletePartitions, List<StopReplicaTopicState> topicStates) { - super(ApiKeys.STOP_REPLICA, version, controllerId, controllerEpoch, brokerEpoch, false); - this.deletePartitions = deletePartitions; - this.topicStates = topicStates; } public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch, this is another case where we should define this in terms of the other constructor to avoid duplication ``` this(version, controllerId, controllerEpoch, brokerEpoch, deletePartitions, topicStates, false) ``` public static class Builder extends AbstractControlRequest.Builder<StopReplicaRe public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch, boolean deletePartitions, List<StopReplicaTopicState> topicStates) { + this(version, controllerId, controllerEpoch, brokerEpoch, deletePartitions, + topicStates, false); } public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch,
codereview_new_java_data_8374
public static class Builder extends AbstractControlRequest.Builder<UpdateMetadat public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch, List<UpdateMetadataPartitionState> partitionStates, List<UpdateMetadataBroker> liveBrokers, Map<String, Uuid> topicIds) { - super(ApiKeys.UPDATE_METADATA, version, controllerId, controllerEpoch, brokerEpoch, false); - this.partitionStates = partitionStates; - this.liveBrokers = liveBrokers; - this.topicIds = topicIds; } public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch, same, define this in terms of the other ctor public static class Builder extends AbstractControlRequest.Builder<UpdateMetadat public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch, List<UpdateMetadataPartitionState> partitionStates, List<UpdateMetadataBroker> liveBrokers, Map<String, Uuid> topicIds) { + this(version, controllerId, controllerEpoch, brokerEpoch, partitionStates, + liveBrokers, topicIds, false); } public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch,
codereview_new_java_data_8376
public void shouldDeleteKeyAndPropagateV0() { .withValue(new Change<>(newValue, oldValue)), forwarded.get(0).record() ); - - stateStore.close(); } @Test Why do you close the state store in the test here but not in the other tests? public void shouldDeleteKeyAndPropagateV0() { .withValue(new Change<>(newValue, oldValue)), forwarded.get(0).record() ); } @Test
codereview_new_java_data_8379
/** * <p> * Command line utility that runs Kafka Connect as a standalone process. In this mode, work (connectors and tasks) is not - * distributed. Instead, all the normal Connect machinery works within a single process. This is useful for for development - * and testing Kafka Connect on a local machine. * </p> */ public class ConnectStandalone extends AbstractConnectCli<StandaloneConfig> { I'd prefer to keep the existing wording of "ad hoc, small, or experimental jobs". I know it's generally recommended to run Connect in distributed mode but standalone mode is still a legitimate part of the project and we make that clear in [our docs](https://kafka.apache.org/33/documentation.html#connect_running): > In standalone mode all work is performed in a single process. This configuration is simpler to setup and get started with and may be useful in situations where only one worker makes sense (e.g. collecting log files), but it does not benefit from some of the features of Kafka Connect such as fault tolerance. /** * <p> * Command line utility that runs Kafka Connect as a standalone process. In this mode, work (connectors and tasks) is not + * distributed. Instead, all the normal Connect machinery works within a single process. This is useful for ad hoc, + * small, or experimental jobs. + * </p> + * <p> + * Connector and task configs are stored in memory and are not persistent. However, connector offset data is persistent + * since it uses file storage (configurable via {@link StandaloneConfig#OFFSET_STORAGE_FILE_FILENAME_CONFIG}) * </p> */ public class ConnectStandalone extends AbstractConnectCli<StandaloneConfig> {
codereview_new_java_data_8380
protected void processExtraArgs(Herder herder, Connect connect, String[] extraAr cb.get(); } } catch (Throwable t) { - log.error("Stopping Connect due to an error while attempting to create a connector", t); connect.stop(); Exit.exit(3); } Can we please keep [the existing wording](https://github.com/apache/kafka/blob/de088a2e9758e36efe60b1d8acb18b4881b5a9fc/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java#L121) for this error message? protected void processExtraArgs(Herder herder, Connect connect, String[] extraAr cb.get(); } } catch (Throwable t) { + log.error("Stopping after connector error", t); connect.stop(); Exit.exit(3); }
codereview_new_java_data_8382
public String toString() { + "To enable exactly-once source support on a new cluster, set this property to '" + ExactlyOnceSourceSupport.ENABLED + "'. " + "To enable support on an existing cluster, first set to '" + ExactlyOnceSourceSupport.PREPARING + "' on every worker in the cluster, " + "then set to '" + ExactlyOnceSourceSupport.ENABLED + "'. A rolling upgrade may be used for both changes. " - + "See the <a href=\"https://kafka.apache.org/documentation.html#connect_exactlyoncesource\">exactly-once source support documentation</a> " - + "for more information on this feature."; public static final String EXACTLY_ONCE_SOURCE_SUPPORT_DEFAULT = ExactlyOnceSourceSupport.DISABLED.toString(); private static Object defaultKeyGenerationAlgorithm(Crypto crypto) { Recommend reordering this sentence to: "For more information on this feature, see the <a href..." public String toString() { + "To enable exactly-once source support on a new cluster, set this property to '" + ExactlyOnceSourceSupport.ENABLED + "'. " + "To enable support on an existing cluster, first set to '" + ExactlyOnceSourceSupport.PREPARING + "' on every worker in the cluster, " + "then set to '" + ExactlyOnceSourceSupport.ENABLED + "'. A rolling upgrade may be used for both changes. " + + "For more information on this feature, see the " + + "<a href=\"https://kafka.apache.org/documentation.html#connect_exactlyoncesource\">exactly-once source support documentation</a>."; public static final String EXACTLY_ONCE_SOURCE_SUPPORT_DEFAULT = ExactlyOnceSourceSupport.DISABLED.toString(); private static Object defaultKeyGenerationAlgorithm(Crypto crypto) {
codereview_new_java_data_8383
* limitations under the License. */ /** - * Provides pluggable interfaces for connector security policies. */ package org.apache.kafka.connect.connector.policy; \ No newline at end of file Maybe worth including an example that calls out the only interface currently in this package? Possibly something like this: ```suggestion * Provides pluggable interfaces for limiting how users can modify and configure connectors. For example, the * {@link org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy ConnectorClientConfigOverridePolicy} * interface can be used to control which Kafka client properties can be overridden on a per-connector basis. ``` * limitations under the License. */ /** + * Provides pluggable interfaces for policies controlling how users can configure connectors. + * For example, the + * {@link org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy ConnectorClientConfigOverridePolicy} + * interface can be used to control which Kafka client properties can be overridden on a per-connector basis. */ package org.apache.kafka.connect.connector.policy; \ No newline at end of file
codereview_new_java_data_8384
* limitations under the License. */ /** - * Provides common exception classes for Connect. */ package org.apache.kafka.connect.errors; \ No newline at end of file We should make it clear that these exception classes establish a two-way API, between connectors/tasks/etc. and the framework: ```suggestion * Provides common exception classes for Connect. * Some may be thrown by the Connect runtime if connectors, tasks, or other components perform invalid or failed operations, * and others may be thrown by connectors, tasks, or other components to signal failures to the Connect runtime. ``` * limitations under the License. */ /** + * Provides common exception classes for Connect, used by the framework and plugins to communicate failures. */ package org.apache.kafka.connect.errors; \ No newline at end of file
codereview_new_java_data_8385
* limitations under the License. */ /** - * Provides interface for describing the state of a running Connect cluster. */ package org.apache.kafka.connect.health; \ No newline at end of file We should be clear about who is going to be describing the cluster, and how this information can be used: ```suggestion * Provides an API for describing the state of a running Connect cluster. * The Connect runtime will instantiate members of this package and expose them to * components whose API gives them access to cluster information. * <p/> * For example, {@link org.apache.kafka.connect.rest.ConnectRestExtension REST extensions} * can use their {@link org.apache.kafka.connect.rest.ConnectRestExtensionContext#clusterState() context} * to learn about the state of the cluster. ``` * limitations under the License. */ /** + * Provides an API for describing the state of a running Connect cluster to + * {@link org.apache.kafka.connect.rest.ConnectRestExtension} instances. */ package org.apache.kafka.connect.health; \ No newline at end of file
codereview_new_java_data_8386
* limitations under the License. */ /** - * Provides API for implementing connectors which write Kafka records to external applications. */ package org.apache.kafka.connect.sink; \ No newline at end of file Nits: ```suggestion * Provides an API for implementing connectors which write Kafka records to external applications, * also known as <i>sink connectors</i>. ``` * limitations under the License. */ /** + * Provides an API for implementing sink connectors which write Kafka records to external applications. */ package org.apache.kafka.connect.sink; \ No newline at end of file
codereview_new_java_data_8387
* limitations under the License. */ /** - * Provides pluggable interface for altering data which is being moved by Connect. */ package org.apache.kafka.connect.transforms; \ No newline at end of file Nit: ```suggestion * Provides a pluggable interface for altering data which is being moved by Connect. ``` * limitations under the License. */ /** + * Provides a pluggable interface for altering data which is being moved by Connect. */ package org.apache.kafka.connect.transforms; \ No newline at end of file
codereview_new_java_data_8388
* limitations under the License. */ /** - * Provides pluggable interface for describing when a {@link org.apache.kafka.connect.transforms.Transformation} should be applied to a record. */ package org.apache.kafka.connect.transforms.predicates; \ No newline at end of file Nit: ```suggestion * Provides a pluggable interface for describing when a {@link org.apache.kafka.connect.transforms.Transformation} should be applied to a record. ``` * limitations under the License. */ /** + * Provides a pluggable interface for describing when a {@link org.apache.kafka.connect.transforms.Transformation} should be applied to a record. */ package org.apache.kafka.connect.transforms.predicates; \ No newline at end of file
codereview_new_java_data_8389
* limitations under the License. */ /** - * Provides low-level abstractions of streaming data and computation over that streaming data. */ package org.apache.kafka.streams.processor; \ No newline at end of file Same as for `processer.api` * limitations under the License. */ /** + * Provides a low-level programming model (Processor API, aka, PAPI) to express a (stateful) data flow computation over input topics. + * Use {@link org.apache.kafka.streams.Topology} as the entry point for your program. */ package org.apache.kafka.streams.processor; \ No newline at end of file
codereview_new_java_data_8390
* limitations under the License. */ /** - * Provides API for extracting data from a Streams application. */ package org.apache.kafka.streams.query; \ No newline at end of file What about this: ``` Provides a query API (aka Interactive Queries) over state stores, for extracting data from a stateful Kafka Streams application. ``` * limitations under the License. */ /** + * Provides a query API (aka Interactive Queries) over state stores, for extracting data from a stateful Kafka Streams application. */ package org.apache.kafka.streams.query; \ No newline at end of file
codereview_new_java_data_8391
* limitations under the License. */ /** - * Provides classes for testing Streams applications with mocked inputs. */ package org.apache.kafka.streams.test; \ No newline at end of file ```suggestion * Provides classes for testing Kafka Streams applications with mocked inputs. ``` * limitations under the License. */ /** + * Provides classes for testing Kafka Streams applications with mocked inputs. */ package org.apache.kafka.streams.test; \ No newline at end of file
codereview_new_java_data_8393
public class StreamsConfig extends AbstractConfig { @SuppressWarnings("WeakerAccess") public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG; private static final String CLIENT_ID_DOC = "An ID prefix string used for the client IDs of internal consumer, producer and restore-consumer," + - " with pattern "<code>" + <client.id>-StreamThread-<threadSequenceNumber>-<consumer|producer|restore-consumer> + "</code>."; /** {@code commit.interval.ms} */ @SuppressWarnings("WeakerAccess") It should be something like: ```java " with pattern <code>&lt;client.id&gt;-StreamThread-&lt;threadSequenceNumber$gt;-&lt;consumer|producer|restore-consumer&gt;</code>."; ``` public class StreamsConfig extends AbstractConfig { @SuppressWarnings("WeakerAccess") public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG; private static final String CLIENT_ID_DOC = "An ID prefix string used for the client IDs of internal consumer, producer and restore-consumer," + + " with pattern <code>&lt;client.id&gt;-StreamThread-&lt;threadSequenceNumber$gt;-&lt;consumer|producer|restore-consumer&gt;</code>."; /** {@code commit.interval.ms} */ @SuppressWarnings("WeakerAccess")
codereview_new_java_data_8394
public static VerificationResult verify(final String kafka, return verificationResult; } - private static Map<String, Set<Number>> parseRecordsForEchoTopic(final Map<String, Map<String, LinkedList<ConsumerRecord<String, Number>>>> events) { return events.containsKey("echo") ? events.get("echo") .entrySet() nit: This is along expression, I'd prefer to write this with an if-construct, but I won't insist. public static VerificationResult verify(final String kafka, return verificationResult; } + private static Map<String, Set<Number>> parseRecordsForEchoTopic( + final Map<String, Map<String, LinkedList<ConsumerRecord<String, Number>>>> events) { return events.containsKey("echo") ? events.get("echo") .entrySet()
codereview_new_java_data_8396
* limitations under the License. */ /** - * Kafka Client for producing events to a Kafka Cluster */ package org.apache.kafka.clients.producer; \ No newline at end of file ```suggestion * Provides a Kafka client for producing records to topics and/or partitions in a Kafka cluster. ``` * limitations under the License. */ /** + * Provides a Kafka client for producing records to topics and/or partitions in a Kafka cluster. */ package org.apache.kafka.clients.producer; \ No newline at end of file
codereview_new_java_data_8397
* limitations under the License. */ /** - * Access Control List API for Authorization of Kafka Clients */ package org.apache.kafka.common.acl; \ No newline at end of file ```suggestion * Provides classes representing Access Control Lists for authorization of clients. ``` * limitations under the License. */ /** + * Provides classes representing Access Control Lists for authorization of clients */ package org.apache.kafka.common.acl; \ No newline at end of file
codereview_new_java_data_8398
* limitations under the License. */ /** - * Annotations for describing properties of Kafka API Classes */ package org.apache.kafka.common.annotation; \ No newline at end of file ```suggestion * Provides annotations used on Kafka APIs. ``` * limitations under the License. */ /** + * Provides annotations used on Kafka APIs. */ package org.apache.kafka.common.annotation; \ No newline at end of file